From 1816e3cc9bef5e2321505f1d2b087fe90996dad5 Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Tue, 30 Apr 2024 11:57:13 -0600 Subject: [PATCH 01/90] Version: bump 5.2.0-SNAPSHOT --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index af5bb19bcb2..aa0dd05ed38 100644 --- a/build.gradle +++ b/build.gradle @@ -74,7 +74,7 @@ configure(coreProjects) { apply plugin: 'idea' group = 'org.mongodb' - version = '5.1.0' + version = '5.2.0-SNAPSHOT' repositories { mavenLocal() From 330d3b172edad4d9072552127937bd51493e1365 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Fri, 3 May 2024 10:22:30 -0400 Subject: [PATCH 02/90] Remove support for MongoDB 3.6 (#1375) * Remove branching code in the driver based on 3.6 version checks * Remove testing of 3.6 * Clean up tests JAVA-5294 --- .evergreen/.evg.yml | 8 +--- .../mongodb/connection/ServerDescription.java | 2 +- .../internal/connection/CommandMessage.java | 11 ++---- .../connection/DefaultAuthenticator.java | 25 ++++--------- .../operation/ServerVersionHelper.java | 20 +--------- .../com/mongodb/ClusterFixture.java | 14 ++----- .../client/CommandMonitoringTestHelper.java | 9 ++--- .../rs/compatible.json | 2 +- .../rs/compatible_unknown.json | 2 +- .../sharded/compatible.json | 2 +- .../single/compatible.json | 2 +- .../single/too_old_then_upgraded.json | 4 +- .../CommandMessageSpecification.groovy | 37 ++++--------------- ...ternalStreamConnectionSpecification.groovy | 4 +- ...gingCommandEventSenderSpecification.groovy | 10 ++--- .../connection/TestInternalConnection.java | 4 +- .../X509AuthenticatorNoUserNameTest.java | 4 +- .../main/com/mongodb/MapReduceCommand.java | 2 +- .../mongodb/DBCollectionAggregationTest.java | 3 -- .../functional/com/mongodb/DBCursorTest.java | 9 +---- .../test/functional/com/mongodb/DBTest.java | 2 - .../functional/com/mongodb/MapReduceTest.java | 3 -- .../client/BatchCursorPublisherErrorTest.java | 3 -- .../client/ChangeStreamsCancellationTest.java | 3 +- .../client/ReadConcernTest.java | 7 ---- .../client/RetryableWritesProseTest.java | 3 +- .../documentation/DocumentationSamples.java | 8 +--- .../mongodb/client/AbstractExplainTest.java | 3 -- .../client/AbstractSessionsProseTest.java | 2 - .../mongodb/client/ChangeStreamProseTest.java | 2 +- .../com/mongodb/client/CrudProseTest.java | 2 - .../com/mongodb/client/ReadConcernTest.java | 8 ---- .../client/RetryableWritesProseTest.java | 2 +- 33 files changed, 55 insertions(+), 167 deletions(-) diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index 886282b77c4..58369f23a59 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -1802,10 +1802,6 @@ axes: display_name: "4.0" variables: VERSION: "4.0" - - id: "3.6" - display_name: "3.6" - variables: - VERSION: "3.6" - id: os display_name: OS values: @@ -2223,7 +2219,7 @@ buildvariants: - name: "test" - matrix_name: "tests-jdk8-unsecure" - matrix_spec: { auth: "noauth", ssl: "nossl", jdk: "jdk8", version: ["3.6", "4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "latest"], + matrix_spec: { auth: "noauth", ssl: "nossl", jdk: "jdk8", version: ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "latest"], topology: "*", os: "linux" } display_name: "${version} ${topology} ${auth} ${ssl} ${jdk} ${os} " tags: ["tests-variant"] @@ -2232,7 +2228,7 @@ buildvariants: - matrix_name: "tests-jdk-secure" matrix_spec: { auth: "auth", ssl: "ssl", jdk: [ "jdk8", "jdk17", "jdk21"], - version: [ "3.6", "4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "latest" ], + version: ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "latest" ], topology: "*", os: "linux" } display_name: "${version} ${topology} ${auth} ${ssl} ${jdk} ${os} " tags: ["tests-variant"] diff --git a/driver-core/src/main/com/mongodb/connection/ServerDescription.java b/driver-core/src/main/com/mongodb/connection/ServerDescription.java index fbc59cc944f..1bf0a037924 100644 --- a/driver-core/src/main/com/mongodb/connection/ServerDescription.java +++ b/driver-core/src/main/com/mongodb/connection/ServerDescription.java @@ -58,7 +58,7 @@ public class ServerDescription { * The minimum supported driver wire version * @since 3.8 */ - public static final int MIN_DRIVER_WIRE_VERSION = 6; + public static final int MIN_DRIVER_WIRE_VERSION = 7; /** * The maximum supported driver wire version * @since 3.8 diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java index f9ca361778f..24b30d60acb 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java @@ -49,7 +49,6 @@ import static com.mongodb.internal.connection.ReadConcernHelper.getReadConcernDocument; import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_TWO_WIRE_VERSION; import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_ZERO_WIRE_VERSION; -import static com.mongodb.internal.operation.ServerVersionHelper.THREE_DOT_SIX_WIRE_VERSION; /** * A command message that uses OP_MSG or OP_QUERY to send the command. @@ -270,9 +269,7 @@ private void addServerApiElements(final List extraElements) { } private void checkServerVersionForTransactionSupport() { - int wireVersion = getSettings().getMaxWireVersion(); - if (wireVersion < FOUR_DOT_ZERO_WIRE_VERSION - || (wireVersion < FOUR_DOT_TWO_WIRE_VERSION && getSettings().getServerType() == SHARD_ROUTER)) { + if (getSettings().getMaxWireVersion() < FOUR_DOT_TWO_WIRE_VERSION && getSettings().getServerType() == SHARD_ROUTER) { throw new MongoClientException("Transactions are not supported by the MongoDB cluster to which this client is connected."); } } @@ -287,12 +284,12 @@ private void addReadConcernDocument(final List extraElements, final private static OpCode getOpCode(final MessageSettings settings, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { - return isServerVersionAtLeastThreeDotSix(settings) || clusterConnectionMode == LOAD_BALANCED || serverApi != null + return isServerVersionKnown(settings) || clusterConnectionMode == LOAD_BALANCED || serverApi != null ? OpCode.OP_MSG : OpCode.OP_QUERY; } - private static boolean isServerVersionAtLeastThreeDotSix(final MessageSettings settings) { - return settings.getMaxWireVersion() >= THREE_DOT_SIX_WIRE_VERSION; + private static boolean isServerVersionKnown(final MessageSettings settings) { + return settings.getMaxWireVersion() >= FOUR_DOT_ZERO_WIRE_VERSION; } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java index 86b081b621d..13e7ec09a16 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java @@ -32,7 +32,6 @@ import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_256; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.isTrueArgument; -import static com.mongodb.internal.operation.ServerVersionHelper.serverIsLessThanVersionFourDotZero; import static java.lang.String.format; class DefaultAuthenticator extends Authenticator implements SpeculativeAuthenticator { @@ -48,29 +47,19 @@ class DefaultAuthenticator extends Authenticator implements SpeculativeAuthentic @Override void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) { - if (serverIsLessThanVersionFourDotZero(connectionDescription)) { - new ScramShaAuthenticator(getMongoCredentialWithCache().withMechanism(SCRAM_SHA_1), getClusterConnectionMode(), getServerApi()) - .authenticate(connection, connectionDescription); - } else { - try { - setDelegate(connectionDescription); - delegate.authenticate(connection, connectionDescription); - } catch (Exception e) { - throw wrapException(e); - } + try { + setDelegate(connectionDescription); + delegate.authenticate(connection, connectionDescription); + } catch (Exception e) { + throw wrapException(e); } } @Override void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, final SingleResultCallback callback) { - if (serverIsLessThanVersionFourDotZero(connectionDescription)) { - new ScramShaAuthenticator(getMongoCredentialWithCache().withMechanism(SCRAM_SHA_1), getClusterConnectionMode(), getServerApi()) - .authenticateAsync(connection, connectionDescription, callback); - } else { - setDelegate(connectionDescription); - delegate.authenticateAsync(connection, connectionDescription, callback); - } + setDelegate(connectionDescription); + delegate.authenticateAsync(connection, connectionDescription, callback); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/operation/ServerVersionHelper.java b/driver-core/src/main/com/mongodb/internal/operation/ServerVersionHelper.java index 1c95774a68f..68a03410832 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ServerVersionHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ServerVersionHelper.java @@ -25,34 +25,18 @@ public final class ServerVersionHelper { public static final int MIN_WIRE_VERSION = 0; - public static final int THREE_DOT_SIX_WIRE_VERSION = 6; public static final int FOUR_DOT_ZERO_WIRE_VERSION = 7; public static final int FOUR_DOT_TWO_WIRE_VERSION = 8; public static final int FOUR_DOT_FOUR_WIRE_VERSION = 9; public static final int FIVE_DOT_ZERO_WIRE_VERSION = 12; public static final int SIX_DOT_ZERO_WIRE_VERSION = 17; - private static final int SEVEN_DOT_ZERO_WIRE_VERSION = 21; - - public static boolean serverIsAtLeastVersionFourDotZero(final ConnectionDescription description) { - return description.getMaxWireVersion() >= FOUR_DOT_ZERO_WIRE_VERSION; - } - - public static boolean serverIsAtLeastVersionFourDotTwo(final ConnectionDescription description) { - return description.getMaxWireVersion() >= FOUR_DOT_TWO_WIRE_VERSION; - } + public static final int SEVEN_DOT_ZERO_WIRE_VERSION = 21; + public static final int LATEST_WIRE_VERSION = SEVEN_DOT_ZERO_WIRE_VERSION; public static boolean serverIsAtLeastVersionFourDotFour(final ConnectionDescription description) { return description.getMaxWireVersion() >= FOUR_DOT_FOUR_WIRE_VERSION; } - public static boolean serverIsAtLeastVersionFiveDotZero(final ConnectionDescription description) { - return description.getMaxWireVersion() >= FIVE_DOT_ZERO_WIRE_VERSION; - } - - public static boolean serverIsLessThanVersionFourDotZero(final ConnectionDescription description) { - return description.getMaxWireVersion() < FOUR_DOT_ZERO_WIRE_VERSION; - } - public static boolean serverIsLessThanVersionFourDotTwo(final ConnectionDescription description) { return description.getMaxWireVersion() < FOUR_DOT_TWO_WIRE_VERSION; } diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java index 934c83f113b..920a2c2ac09 100644 --- a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java +++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java @@ -330,11 +330,8 @@ public static ReadWriteBinding getBinding(final ReadPreference readPreference) { private static ReadWriteBinding getBinding(final Cluster cluster, final ReadPreference readPreference) { if (!BINDING_MAP.containsKey(readPreference)) { - ReadWriteBinding binding = new ClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE); - if (serverVersionAtLeast(3, 6)) { - binding = new SessionBinding(binding); - } + ReadWriteBinding binding = new SessionBinding(new ClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, getServerApi(), + IgnorableRequestContext.INSTANCE)); BINDING_MAP.put(readPreference, binding); } return BINDING_MAP.get(readPreference); @@ -367,11 +364,8 @@ public static AsyncReadWriteBinding getAsyncBinding(final ReadPreference readPre public static AsyncReadWriteBinding getAsyncBinding(final Cluster cluster, final ReadPreference readPreference) { if (!ASYNC_BINDING_MAP.containsKey(readPreference)) { - AsyncReadWriteBinding binding = new AsyncClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE); - if (serverVersionAtLeast(3, 6)) { - binding = new AsyncSessionBinding(binding); - } + AsyncReadWriteBinding binding = new AsyncSessionBinding(new AsyncClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, + getServerApi(), IgnorableRequestContext.INSTANCE)); ASYNC_BINDING_MAP.put(readPreference, binding); } return ASYNC_BINDING_MAP.get(readPreference); diff --git a/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java index 8ba3a5b3851..4c045001b10 100644 --- a/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java +++ b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java @@ -43,7 +43,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.CrudTestHelper.replaceTypeAssertionWithActual; import static java.util.Arrays.asList; import static org.junit.Assert.assertEquals; @@ -90,11 +89,9 @@ public static List getExpectedEvents(final BsonArray expectedEvent } // Not clear whether these global fields should be included, but also not clear how to efficiently exclude them - if (serverVersionAtLeast(3, 6)) { - commandDocument.put("$db", new BsonString(actualDatabaseName)); - if (operation != null && operation.containsKey("read_preference")) { - commandDocument.put("$readPreference", operation.getDocument("read_preference")); - } + commandDocument.put("$db", new BsonString(actualDatabaseName)); + if (operation != null && operation.containsKey("read_preference")) { + commandDocument.put("$readPreference", operation.getDocument("read_preference")); } commandEvent = new CommandStartedEvent(null, 1, 1, null, actualDatabaseName, commandName, commandDocument); diff --git a/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible.json b/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible.json index 444b13e9d57..dfd5d57dfab 100644 --- a/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible.json +++ b/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible_unknown.json b/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible_unknown.json index cf92dd1ed35..95e03ea958e 100644 --- a/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible_unknown.json +++ b/driver-core/src/test/resources/server-discovery-and-monitoring/rs/compatible_unknown.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/driver-core/src/test/resources/server-discovery-and-monitoring/sharded/compatible.json b/driver-core/src/test/resources/server-discovery-and-monitoring/sharded/compatible.json index e531db97f9f..ceb0ec24c4c 100644 --- a/driver-core/src/test/resources/server-discovery-and-monitoring/sharded/compatible.json +++ b/driver-core/src/test/resources/server-discovery-and-monitoring/sharded/compatible.json @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/driver-core/src/test/resources/server-discovery-and-monitoring/single/compatible.json b/driver-core/src/test/resources/server-discovery-and-monitoring/single/compatible.json index 302927598ca..493d9b748e6 100644 --- a/driver-core/src/test/resources/server-discovery-and-monitoring/single/compatible.json +++ b/driver-core/src/test/resources/server-discovery-and-monitoring/single/compatible.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/driver-core/src/test/resources/server-discovery-and-monitoring/single/too_old_then_upgraded.json b/driver-core/src/test/resources/server-discovery-and-monitoring/single/too_old_then_upgraded.json index 58ae7d9de40..c3dd98cf62e 100644 --- a/driver-core/src/test/resources/server-discovery-and-monitoring/single/too_old_then_upgraded.json +++ b/driver-core/src/test/resources/server-discovery-and-monitoring/single/too_old_then_upgraded.json @@ -1,5 +1,5 @@ { - "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 6", + "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 21", "uri": "mongodb://a", "phases": [ { @@ -35,7 +35,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy index 12d22e31fd1..edc6e92c30e 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy @@ -43,7 +43,7 @@ import java.nio.ByteBuffer import static com.mongodb.internal.connection.SplittablePayload.Type.INSERT import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_ZERO_WIRE_VERSION -import static com.mongodb.internal.operation.ServerVersionHelper.THREE_DOT_SIX_WIRE_VERSION +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION class CommandMessageSpecification extends Specification { @@ -55,7 +55,7 @@ class CommandMessageSpecification extends Specification { given: def message = new CommandMessage(namespace, command, fieldNameValidator, readPreference, MessageSettings.builder() - .maxWireVersion(THREE_DOT_SIX_WIRE_VERSION) + .maxWireVersion(LATEST_WIRE_VERSION) .serverType(serverType as ServerType) .sessionSupported(true) .build(), @@ -148,9 +148,7 @@ class CommandMessageSpecification extends Specification { def expectedCommandDocument = new BsonDocument('insert', new BsonString('coll')).append('documents', new BsonArray([new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2))])) - if (maxWireVersion == THREE_DOT_SIX_WIRE_VERSION) { - expectedCommandDocument.append('$db', new BsonString(namespace.getDatabaseName())) - } + expectedCommandDocument.append('$db', new BsonString(namespace.getDatabaseName())) then: commandDocument == expectedCommandDocument @@ -158,14 +156,14 @@ class CommandMessageSpecification extends Specification { where: [maxWireVersion, originalCommandDocument, payload] << [ [ - THREE_DOT_SIX_WIRE_VERSION, + LATEST_WIRE_VERSION, new BsonDocument('insert', new BsonString('coll')), new SplittablePayload(INSERT, [new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2))] .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) } ), ], [ - THREE_DOT_SIX_WIRE_VERSION, + LATEST_WIRE_VERSION, new BsonDocument('insert', new BsonString('coll')).append('documents', new BsonArray([new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2))])), null @@ -176,7 +174,7 @@ class CommandMessageSpecification extends Specification { def 'should respect the max message size'() { given: def maxMessageSize = 1024 - def messageSettings = MessageSettings.builder().maxMessageSize(maxMessageSize).maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + def messageSettings = MessageSettings.builder().maxMessageSize(maxMessageSize).maxWireVersion(LATEST_WIRE_VERSION).build() def insertCommand = new BsonDocument('insert', new BsonString(namespace.collectionName)) def payload = new SplittablePayload(INSERT, [new BsonDocument('_id', new BsonInt32(1)).append('a', new BsonBinary(new byte[913])), new BsonDocument('_id', new BsonInt32(2)).append('b', new BsonBinary(new byte[441])), @@ -262,7 +260,7 @@ class CommandMessageSpecification extends Specification { def 'should respect the max batch count'() { given: - def messageSettings = MessageSettings.builder().maxBatchCount(2).maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + def messageSettings = MessageSettings.builder().maxBatchCount(2).maxWireVersion(LATEST_WIRE_VERSION).build() def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900])), new BsonDocument('b', new BsonBinary(new byte[450])), new BsonDocument('c', new BsonBinary(new byte[450]))] @@ -309,7 +307,7 @@ class CommandMessageSpecification extends Specification { def 'should throw if payload document bigger than max document size'() { given: def messageSettings = MessageSettings.builder().maxDocumentSize(900) - .maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + .maxWireVersion(LATEST_WIRE_VERSION).build() def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900]))] .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }) def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, @@ -326,25 +324,6 @@ class CommandMessageSpecification extends Specification { thrown(BsonMaximumSizeExceededException) } - def 'should throw if wire version does not support transactions'() { - given: - def messageSettings = MessageSettings.builder().maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() - def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonInt32(1))]) - def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) - def output = new BasicOutputBuffer() - def sessionContext = Stub(SessionContext) { - getReadConcern() >> ReadConcern.DEFAULT - hasActiveTransaction() >> true - } - - when: - message.encode(output, sessionContext) - - then: - thrown(MongoClientException) - } - def 'should throw if wire version and sharded cluster does not support transactions'() { given: def messageSettings = MessageSettings.builder().serverType(ServerType.SHARD_ROUTER) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy index ba5625999d1..c0cd580e02e 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy @@ -67,7 +67,7 @@ import static com.mongodb.connection.ConnectionDescription.getDefaultMaxWriteBat import static com.mongodb.connection.ServerDescription.getDefaultMaxDocumentSize import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER -import static com.mongodb.internal.operation.ServerVersionHelper.THREE_DOT_SIX_WIRE_VERSION +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION import static java.util.concurrent.TimeUnit.NANOSECONDS import static java.util.concurrent.TimeUnit.SECONDS @@ -81,7 +81,7 @@ class InternalStreamConnectionSpecification extends Specification { def serverAddress = new ServerAddress() def connectionId = new ConnectionId(SERVER_ID, 1, 1) def commandListener = new TestCommandListener() - def messageSettings = MessageSettings.builder().maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() def connectionDescription = new ConnectionDescription(connectionId, 3, ServerType.STANDALONE, getDefaultMaxWriteBatchSize(), getDefaultMaxDocumentSize(), getDefaultMaxMessageSize(), []) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy index 8ff260995dd..9c3fb0d91db 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy @@ -41,7 +41,7 @@ import spock.lang.Specification import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE import static com.mongodb.connection.ClusterConnectionMode.SINGLE -import static com.mongodb.internal.operation.ServerVersionHelper.THREE_DOT_SIX_WIRE_VERSION +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION class LoggingCommandEventSenderSpecification extends Specification { @@ -49,7 +49,7 @@ class LoggingCommandEventSenderSpecification extends Specification { given: def connectionDescription = new ConnectionDescription(new ServerId(new ClusterId(), new ServerAddress())) def namespace = new MongoNamespace('test.driver') - def messageSettings = MessageSettings.builder().maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() def commandListener = new TestCommandListener() def commandDocument = new BsonDocument('ping', new BsonInt32(1)) def replyDocument = new BsonDocument('ok', new BsonInt32(1)) @@ -95,7 +95,7 @@ class LoggingCommandEventSenderSpecification extends Specification { def connectionDescription = new ConnectionDescription(serverId) .withConnectionId(new ConnectionId(serverId, 42, 1000)) def namespace = new MongoNamespace('test.driver') - def messageSettings = MessageSettings.builder().maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() def commandDocument = new BsonDocument('ping', new BsonInt32(1)) def replyDocument = new BsonDocument('ok', new BsonInt32(42)) def failureException = new MongoInternalException('failure!') @@ -153,7 +153,7 @@ class LoggingCommandEventSenderSpecification extends Specification { def connectionDescription = new ConnectionDescription(serverId) .withConnectionId(new ConnectionId(serverId, 42, 1000)) def namespace = new MongoNamespace('test.driver') - def messageSettings = MessageSettings.builder().maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() def commandDocument = new BsonDocument('fake', new BsonBinary(new byte[2048])) def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, SINGLE, null) @@ -186,7 +186,7 @@ class LoggingCommandEventSenderSpecification extends Specification { def connectionDescription = new ConnectionDescription(serverId) .withConnectionId(new ConnectionId(serverId, 42, 1000)) def namespace = new MongoNamespace('test.driver') - def messageSettings = MessageSettings.builder().maxWireVersion(THREE_DOT_SIX_WIRE_VERSION).build() + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() def commandDocument = new BsonDocument('createUser', new BsonString('private')) def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, SINGLE, null) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java index e8003f692a9..8e99c89c20d 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java @@ -44,7 +44,7 @@ import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException; import static com.mongodb.internal.connection.ProtocolHelper.isCommandOk; -import static com.mongodb.internal.operation.ServerVersionHelper.THREE_DOT_SIX_WIRE_VERSION; +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION; class TestInternalConnection implements InternalConnection { @@ -66,7 +66,7 @@ private static class Interaction { } TestInternalConnection(final ServerId serverId, final ServerType serverType) { - this.description = new ConnectionDescription(new ConnectionId(serverId), THREE_DOT_SIX_WIRE_VERSION, serverType, 0, 0, 0, + this.description = new ConnectionDescription(new ConnectionId(serverId), LATEST_WIRE_VERSION, serverType, 0, 0, 0, Collections.emptyList()); this.bufferProvider = new SimpleBufferProvider(); diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java index cf829f919c5..e2ea7939880 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java @@ -37,7 +37,7 @@ import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply; import static com.mongodb.internal.connection.MessageHelper.getApiVersionField; import static com.mongodb.internal.connection.MessageHelper.getDbField; -import static com.mongodb.internal.operation.ServerVersionHelper.THREE_DOT_SIX_WIRE_VERSION; +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION; import static org.junit.Assert.assertEquals; public class X509AuthenticatorNoUserNameTest { @@ -48,7 +48,7 @@ public class X509AuthenticatorNoUserNameTest { public void before() { connection = new TestInternalConnection(new ServerId(new ClusterId(), new ServerAddress("localhost", 27017))); connectionDescriptionThreeSix = new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), - THREE_DOT_SIX_WIRE_VERSION, ServerType.STANDALONE, 1000, 16000, + LATEST_WIRE_VERSION, ServerType.STANDALONE, 1000, 16000, 48000, Collections.emptyList()); } diff --git a/driver-legacy/src/main/com/mongodb/MapReduceCommand.java b/driver-legacy/src/main/com/mongodb/MapReduceCommand.java index 0fc4278cacc..d812d6a12af 100644 --- a/driver-legacy/src/main/com/mongodb/MapReduceCommand.java +++ b/driver-legacy/src/main/com/mongodb/MapReduceCommand.java @@ -222,7 +222,7 @@ public long getMaxTime(final TimeUnit timeUnit) { /** * Sets the max execution time for this command, in the given time unit. * - * @param maxTime the maximum execution time. A non-zero value requires a server version >= 2.6 + * @param maxTime the maximum execution time. * @param timeUnit the time unit that maxTime is specified in * @since 2.12.0 */ diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionAggregationTest.java b/driver-legacy/src/test/functional/com/mongodb/DBCollectionAggregationTest.java index c9ffdf14a0e..5ca589c54df 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBCollectionAggregationTest.java +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionAggregationTest.java @@ -28,7 +28,6 @@ import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.isSharded; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.connection.ClusterType.REPLICA_SET; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.SECONDS; @@ -104,7 +103,6 @@ public List prepareData() { @Test public void testExplain() { - assumeTrue(serverVersionAtLeast(3, 6)); List pipeline = new ArrayList<>(prepareData()); CommandResult out = collection.explainAggregate(pipeline, AggregationOptions.builder().build()); assertTrue(out.keySet().iterator().hasNext()); @@ -133,7 +131,6 @@ public void testMaxTime() { @Test public void testWriteConcern() { assumeThat(isDiscoverableReplicaSet(), is(true)); - assumeTrue(serverVersionAtLeast(3, 4)); DBCollection collection = database.getCollection("testWriteConcern"); collection.setWriteConcern(new WriteConcern(5)); try { diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCursorTest.java b/driver-legacy/src/test/functional/com/mongodb/DBCursorTest.java index f04f10efd04..6c4f622507b 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBCursorTest.java +++ b/driver-legacy/src/test/functional/com/mongodb/DBCursorTest.java @@ -28,7 +28,6 @@ import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.isSharded; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; @@ -394,13 +393,7 @@ public void testSettingACommentInsertsCommentIntoProfileCollectionWhenProfilingI assertEquals(1, profileCollection.count()); DBObject profileDocument = profileCollection.findOne(); - if (serverVersionAtLeast(3, 6)) { - assertEquals(expectedComment, ((DBObject) profileDocument.get("command")).get("comment")); - } else if (serverVersionAtLeast(3, 2)) { - assertEquals(expectedComment, ((DBObject) profileDocument.get("query")).get("comment")); - } else { - assertEquals(expectedComment, ((DBObject) profileDocument.get("query")).get("$comment")); - } + assertEquals(expectedComment, ((DBObject) profileDocument.get("command")).get("comment")); } finally { database.command(new BasicDBObject("profile", 0)); profileCollection.drop(); diff --git a/driver-legacy/src/test/functional/com/mongodb/DBTest.java b/driver-legacy/src/test/functional/com/mongodb/DBTest.java index 8b2f8f59d90..4ce9b3f760b 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBTest.java +++ b/driver-legacy/src/test/functional/com/mongodb/DBTest.java @@ -36,7 +36,6 @@ import static com.mongodb.ClusterFixture.getBinding; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.isSharded; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.DBObjectMatchers.hasFields; import static com.mongodb.DBObjectMatchers.hasSubdocument; import static com.mongodb.Fixture.getDefaultDatabaseName; @@ -162,7 +161,6 @@ public void shouldThrowErrorIfCreatingACappedCollectionWithANegativeSize() { @Test public void shouldCreateCollectionWithTheSetCollation() { - assumeThat(serverVersionAtLeast(3, 4), is(true)); // Given collection.drop(); Collation collation = Collation.builder() diff --git a/driver-legacy/src/test/functional/com/mongodb/MapReduceTest.java b/driver-legacy/src/test/functional/com/mongodb/MapReduceTest.java index 74c8d5d15fc..f10a2fd6e93 100644 --- a/driver-legacy/src/test/functional/com/mongodb/MapReduceTest.java +++ b/driver-legacy/src/test/functional/com/mongodb/MapReduceTest.java @@ -29,7 +29,6 @@ import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.isSharded; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.ClusterFixture.serverVersionLessThan; import static com.mongodb.DBObjectMatchers.hasFields; import static com.mongodb.DBObjectMatchers.hasSubdocument; @@ -48,7 +47,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.junit.Assume.assumeThat; -import static org.junit.Assume.assumeTrue; @SuppressWarnings("deprecation") public class MapReduceTest extends DatabaseTestCase { @@ -108,7 +106,6 @@ public void testMapReduceExecutionTimeout() { @Test public void testWriteConcern() { assumeThat(isDiscoverableReplicaSet(), is(true)); - assumeTrue(serverVersionAtLeast(3, 4)); DBCollection collection = database.getCollection("testWriteConcernForMapReduce"); collection.insert(new BasicDBObject("x", new String[]{"a", "b"}).append("s", 1)); collection.setWriteConcern(new WriteConcern(5)); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/BatchCursorPublisherErrorTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/BatchCursorPublisherErrorTest.java index 7bd08753665..ce15fe3d1e4 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/BatchCursorPublisherErrorTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/BatchCursorPublisherErrorTest.java @@ -34,13 +34,11 @@ import static com.mongodb.reactivestreams.client.Fixture.drop; import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabase; import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; -import static com.mongodb.reactivestreams.client.Fixture.serverVersionAtLeast; import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static java.util.stream.IntStream.rangeClosed; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.junit.jupiter.api.DynamicTest.dynamicTest; public class BatchCursorPublisherErrorTest { @@ -49,7 +47,6 @@ public class BatchCursorPublisherErrorTest { @BeforeEach public void setup() { - assumeTrue(serverVersionAtLeast(3, 6)); collection = getDefaultDatabase().getCollection("changeStreamsCancellationTest"); Mono.from(collection.insertMany(rangeClosed(1, 11) .boxed() diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ChangeStreamsCancellationTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ChangeStreamsCancellationTest.java index e7e266a3d92..a41c818ceea 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ChangeStreamsCancellationTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ChangeStreamsCancellationTest.java @@ -27,7 +27,6 @@ import static com.mongodb.reactivestreams.client.Fixture.drop; import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabase; import static com.mongodb.reactivestreams.client.Fixture.isReplicaSet; -import static com.mongodb.reactivestreams.client.Fixture.serverVersionAtLeast; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assumptions.assumeTrue; @@ -37,7 +36,7 @@ public class ChangeStreamsCancellationTest { @BeforeEach public void setup() { - assumeTrue(isReplicaSet() && serverVersionAtLeast(3, 6)); + assumeTrue(isReplicaSet()); collection = getDefaultDatabase().getCollection("changeStreamsCancellationTest"); } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java index 15b1bc7f5cf..e3ff5921ad2 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java @@ -30,12 +30,10 @@ import java.util.List; import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.CommandMonitoringTestHelper.assertEventsEquality; import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabaseName; import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString; import static java.util.Collections.singletonList; -import static org.junit.Assume.assumeTrue; public class ReadConcernTest { private TestCommandListener commandListener; @@ -43,7 +41,6 @@ public class ReadConcernTest { @Before public void setUp() { - assumeTrue(canRunTests()); commandListener = new TestCommandListener(); mongoClient = MongoClients.create(getMongoClientBuilderFromConnectionString() .addCommandListener(commandListener) @@ -74,8 +71,4 @@ public void shouldIncludeReadConcernInCommand() throws InterruptedException { assertEventsEquality(singletonList(new CommandStartedEvent(null, 1, 1, null, getDefaultDatabaseName(), "find", commandDocument)), events); } - - private boolean canRunTests() { - return serverVersionAtLeast(3, 2); - } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java index eb2b73e0c7e..fcfd3160515 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java @@ -33,7 +33,6 @@ import static com.mongodb.ClusterFixture.getServerStatus; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.isSharded; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.ClusterFixture.serverVersionLessThan; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -113,6 +112,6 @@ private boolean canRunTests() { return ((isSharded() || isDiscoverableReplicaSet()) && storageEngine != null && storageEngine.get("name").equals("mmapv1") - && serverVersionAtLeast(3, 6) && serverVersionLessThan(4, 2)); + && serverVersionLessThan(4, 2)); } } diff --git a/driver-sync/src/examples/documentation/DocumentationSamples.java b/driver-sync/src/examples/documentation/DocumentationSamples.java index 22f9793806e..659507807ee 100644 --- a/driver-sync/src/examples/documentation/DocumentationSamples.java +++ b/driver-sync/src/examples/documentation/DocumentationSamples.java @@ -42,7 +42,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static com.mongodb.client.Fixture.getMongoClient; import static com.mongodb.client.model.Accumulators.sum; @@ -507,8 +506,6 @@ public void testProjectingFields() { @Test public void testAggregate() { - assumeTrue(serverVersionAtLeast(3, 6)); - MongoCollection salesCollection = database.getCollection("sales"); // Start Aggregation Example 1 @@ -663,7 +660,7 @@ public void testDeletions() { @Test public void testWatch() throws InterruptedException { - assumeTrue(isDiscoverableReplicaSet() && serverVersionAtLeast(3, 6)); + assumeTrue(isDiscoverableReplicaSet()); MongoCollection inventory = collection; AtomicBoolean stop = new AtomicBoolean(false); @@ -725,9 +722,6 @@ public void testRunCommand() { @Test public void testCreateIndexes() { - - assumeTrue(serverVersionAtLeast(3, 2)); - // Start Index Example 1 collection.createIndex(Indexes.ascending("score")); // End Index Example 1 diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java index 18b8d4dc520..7db4a079a5e 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java @@ -54,8 +54,6 @@ public void tearDown() { @Test public void testExplainOfFind() { - assumeTrue(serverVersionAtLeast(3, 0)); - MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) .getCollection("explainTest", BsonDocument.class); collection.drop(); @@ -147,7 +145,6 @@ private static BsonDocument getAggregateExplainDocument(final BsonDocument rootA public void testExplainOfAggregateWithOldResponseStructure() { // Aggregate explain is supported on earlier versions, but the structure of the response on which we're asserting in this test // changed radically in 4.2. So here we just assert that we got a non-error respinse - assumeTrue(serverVersionAtLeast(3, 6)); assumeTrue(serverVersionLessThan(4, 2)); MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractSessionsProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractSessionsProseTest.java index 8883c1b643d..db3cb497543 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractSessionsProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractSessionsProseTest.java @@ -79,8 +79,6 @@ public static void afterAll() { // Test 13 from #13-existing-sessions-are-not-checked-into-a-cleared-pool-after-forking @Test public void shouldCreateServerSessionOnlyAfterConnectionCheckout() throws InterruptedException { - assumeTrue(serverVersionAtLeast(3, 6)); - Set lsidSet = ConcurrentHashMap.newKeySet(); MongoCollection collection; try (MongoClient client = getMongoClient( diff --git a/driver-sync/src/test/functional/com/mongodb/client/ChangeStreamProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ChangeStreamProseTest.java index 2be283855eb..adbc442c4f9 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/ChangeStreamProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/ChangeStreamProseTest.java @@ -455,7 +455,7 @@ private void disableFailPoint() { } private boolean canRunTests() { - return isDiscoverableReplicaSet() && serverVersionAtLeast(3, 6); + return isDiscoverableReplicaSet(); } private AggregateResponseBatchCursor getBatchCursor(final MongoChangeStreamCursor> cursor) diff --git a/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java index 5694759a845..b8d94cfe067 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java @@ -82,8 +82,6 @@ public void testWriteConcernErrInfoIsPropagated() { */ @Test public void testWriteErrorDetailsIsPropagated() { - assumeTrue(serverVersionAtLeast(3, 2)); - getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions() .validationOptions(new ValidationOptions() diff --git a/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java index 6521fa67010..4ab1d179611 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java @@ -31,11 +31,9 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.CommandMonitoringTestHelper.assertEventsEquality; import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; -import static org.junit.Assume.assumeTrue; public class ReadConcernTest { private MongoClient mongoClient; @@ -43,8 +41,6 @@ public class ReadConcernTest { @Before public void setUp() { - assumeTrue(canRunTests()); - commandListener = new TestCommandListener(); mongoClient = MongoClients.create(getMongoClientSettingsBuilder() .addCommandListener(commandListener) @@ -73,8 +69,4 @@ public void shouldIncludeReadConcernInCommand() { assertEventsEquality(Arrays.asList(new CommandStartedEvent(null, 1, 1, null, getDefaultDatabaseName(), "find", commandDocument)), events); } - - private boolean canRunTests() { - return serverVersionAtLeast(3, 2); - } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java index e449fc628af..c4da13c1e81 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java @@ -261,6 +261,6 @@ private boolean canRunTests() { return ((isSharded() || isDiscoverableReplicaSet()) && storageEngine != null && storageEngine.get("name").equals("mmapv1") - && serverVersionAtLeast(3, 6) && serverVersionLessThan(4, 2)); + && serverVersionLessThan(4, 2)); } } From 1e79c5e75b17f2b8ffb86c9d59377c449554bb14 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Mon, 6 May 2024 21:59:39 -0400 Subject: [PATCH 03/90] Disable failing unified CRUD tests (#1381) JAVA-5458 --- .../com/mongodb/client/unified/UnifiedCrudTest.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java index dae57b323a6..410c6b9e0e9 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java @@ -25,6 +25,8 @@ import java.net.URISyntaxException; import java.util.Collection; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static org.junit.Assume.assumeFalse; public class UnifiedCrudTest extends UnifiedSyncTest { @@ -49,6 +51,10 @@ public static void customSkips(final String fileDescription, final String testDe assumeFalse(testDescription.equals("Unacknowledged findOneAndUpdate with hint document on 4.4+ server")); assumeFalse(testDescription.equals("Unacknowledged findOneAndDelete with hint string on 4.4+ server")); assumeFalse(testDescription.equals("Unacknowledged findOneAndDelete with hint document on 4.4+ server")); + if (isDiscoverableReplicaSet() && serverVersionAtLeast(8, 0)) { + assumeFalse(testDescription.equals("Aggregate with $out includes read preference for 5.0+ server")); + assumeFalse(testDescription.equals("Database-level aggregate with $out includes read preference for 5.0+ server")); + } } @Parameterized.Parameters(name = "{0}: {1}") From 58946d50eca40741e4cce9dcc10730fdba82b9be Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Tue, 7 May 2024 08:48:59 -0600 Subject: [PATCH 04/90] ALLOWED_HOSTS validation, 1 minute machine timeout (#1380) JAVA-5350 --- .../connection/OidcAuthenticator.java | 18 ++++++++--- .../auth/mongodb-oidc-no-retry.json | 3 +- .../OidcAuthenticationProseTests.java | 31 ++++++++++++++++--- 3 files changed, 42 insertions(+), 10 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java index af26abbf87f..164d93aac9c 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java +++ b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java @@ -85,7 +85,8 @@ public final class OidcAuthenticator extends SaslAuthenticator { private static final List ALLOWS_USERNAME = Arrays.asList( AZURE_ENVIRONMENT); - private static final Duration CALLBACK_TIMEOUT = Duration.ofMinutes(5); + private static final Duration CALLBACK_TIMEOUT = Duration.ofMinutes(1); + private static final Duration HUMAN_CALLBACK_TIMEOUT = Duration.ofMinutes(5); public static final String OIDC_TOKEN_FILE = "OIDC_TOKEN_FILE"; @@ -112,6 +113,10 @@ public OidcAuthenticator(final MongoCredentialWithCache credential, } } + private Duration getCallbackTimeout() { + return isHumanCallback() ? HUMAN_CALLBACK_TIMEOUT : CALLBACK_TIMEOUT; + } + @Override public String getMechanismName() { return MONGODB_OIDC.getMechanismName(); @@ -306,7 +311,7 @@ private byte[] evaluate(final byte[] challenge) { // Invoke Callback using cached Refresh Token fallbackState = FallbackState.PHASE_2_REFRESH_CALLBACK_TOKEN; OidcCallbackResult result = requestCallback.onRequest(new OidcCallbackContextImpl( - CALLBACK_TIMEOUT, cachedIdpInfo, cachedRefreshToken, userName)); + getCallbackTimeout(), cachedIdpInfo, cachedRefreshToken, userName)); jwt[0] = populateCacheWithCallbackResultAndPrepareJwt(cachedIdpInfo, result); } else { // cache is empty @@ -315,7 +320,7 @@ private byte[] evaluate(final byte[] challenge) { // no principal request fallbackState = FallbackState.PHASE_3B_CALLBACK_TOKEN; OidcCallbackResult result = requestCallback.onRequest(new OidcCallbackContextImpl( - CALLBACK_TIMEOUT, userName)); + getCallbackTimeout(), userName)); jwt[0] = populateCacheWithCallbackResultAndPrepareJwt(null, result); if (result.getRefreshToken() != null) { throw new MongoConfigurationException( @@ -345,7 +350,7 @@ private byte[] evaluate(final byte[] challenge) { // there is no cached refresh token fallbackState = FallbackState.PHASE_3B_CALLBACK_TOKEN; OidcCallbackResult result = requestCallback.onRequest(new OidcCallbackContextImpl( - CALLBACK_TIMEOUT, idpInfo, null, userName)); + getCallbackTimeout(), idpInfo, null, userName)); jwt[0] = populateCacheWithCallbackResultAndPrepareJwt(idpInfo, result); } } @@ -606,6 +611,11 @@ public static void validateBeforeUse(final MongoCredential credential) { Object environmentName = credential.getMechanismProperty(ENVIRONMENT_KEY, null); Object machineCallback = credential.getMechanismProperty(OIDC_CALLBACK_KEY, null); Object humanCallback = credential.getMechanismProperty(OIDC_HUMAN_CALLBACK_KEY, null); + boolean allowedHostsIsSet = credential.getMechanismProperty(ALLOWED_HOSTS_KEY, null) != null; + if (humanCallback == null && allowedHostsIsSet) { + throw new IllegalArgumentException(ALLOWED_HOSTS_KEY + " must be specified only when " + + OIDC_HUMAN_CALLBACK_KEY + " is specified"); + } if (environmentName == null) { // callback if (machineCallback == null && humanCallback == null) { diff --git a/driver-core/src/test/resources/unified-test-format/auth/mongodb-oidc-no-retry.json b/driver-core/src/test/resources/unified-test-format/auth/mongodb-oidc-no-retry.json index 83065f492ae..eac17137f2f 100644 --- a/driver-core/src/test/resources/unified-test-format/auth/mongodb-oidc-no-retry.json +++ b/driver-core/src/test/resources/unified-test-format/auth/mongodb-oidc-no-retry.json @@ -5,7 +5,8 @@ { "minServerVersion": "7.0", "auth": true, - "authMechanism": "MONGODB-OIDC" + "authMechanism": "MONGODB-OIDC", + "serverless": "forbid" } ], "createEntities": [ diff --git a/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java index 9915f6a6a34..01d530e9e20 100644 --- a/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java +++ b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java @@ -96,15 +96,15 @@ private void assumeTestEnvironment() { } protected static String getOidcUri() { - return getenv("MONGODB_URI_SINGLE"); + return assertNotNull(getenv("MONGODB_URI_SINGLE")); } private static String getOidcUriMulti() { - return getenv("MONGODB_URI_MULTI"); + return assertNotNull(getenv("MONGODB_URI_MULTI")); } private static String getOidcEnv() { - return getenv("OIDC_ENV"); + return assertNotNull(getenv("OIDC_ENV")); } private static void assumeAzure() { @@ -179,13 +179,13 @@ public void test1p2CallbackCalledOnceForMultipleConnections() { @Test public void test2p1ValidCallbackInputs() { - Duration expectedSeconds = Duration.ofMinutes(5); + Duration expectedTimeoutDuration = Duration.ofMinutes(1); TestCallback callback1 = createCallback(); // #. Verify that the request callback was called with the appropriate // inputs, including the timeout parameter if possible. OidcCallback callback2 = (context) -> { - assertEquals(expectedSeconds, context.getTimeout()); + assertEquals(expectedTimeoutDuration, context.getTimeout()); return callback1.onRequest(context); }; MongoClientSettings clientSettings = createSettings(callback2); @@ -232,6 +232,27 @@ public void test2p4InvalidClientConfigurationWithCallback() { () -> performFind(settings)); } + @Test + public void test2p5InvalidAllowedHosts() { + String uri = "mongodb://localhost/?authMechanism=MONGODB-OIDC&&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:123"; + ConnectionString cs = new ConnectionString(uri); + MongoCredential credential = assertNotNull(cs.getCredential()) + .withMechanismProperty("ALLOWED_HOSTS", Collections.emptyList()); + MongoClientSettings settings = MongoClientSettings.builder() + .applicationName(appName) + .applyConnectionString(cs) + .retryReads(false) + .credential(credential) + .build(); + assertCause(IllegalArgumentException.class, + "ALLOWED_HOSTS must not be specified only when OIDC_HUMAN_CALLBACK is specified", + () -> { + try (MongoClient mongoClient = createMongoClient(settings)) { + performFind(mongoClient); + } + }); + } + @Test public void test3p1AuthFailsWithCachedToken() throws ExecutionException, InterruptedException, NoSuchFieldException, IllegalAccessException { TestCallback callbackWrapped = createCallback(); From 70598ff96df7569f43c3a52fef83d3fc523c6e72 Mon Sep 17 00:00:00 2001 From: ashni <105304831+ashni-mongodb@users.noreply.github.com> Date: Tue, 7 May 2024 22:15:25 -0400 Subject: [PATCH 05/90] Fixing broken link to Community Forums (#1386) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 32aecb53c28..88827db052f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,7 +31,7 @@ Talk To Us ---------- If you have questions about using the driver, please reach out on the -[MongoDB Community Forums](https://developer.mongodb.com/community/forums/tags/c/drivers-odms-connectors/7/java-driver). +[MongoDB Community Forums](https://www.mongodb.com/community/forums/tags/c/data/drivers/7/java). Thanks to all the people who have already contributed! From 82f69bf58556d6db4449307d19b33403f92cd0d6 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Wed, 8 May 2024 11:38:13 -0400 Subject: [PATCH 06/90] Add MongoDB 8.0 to testing matrix (#1385) JAVA-5456 --- .evergreen/.evg.yml | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index 58369f23a59..489d08870d6 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -1763,6 +1763,13 @@ axes: # Multiple mongos instances can be specified in the connection string # for this version. SAFE_FOR_MULTI_MONGOS: true + - id: "8.0" + display_name: "8.0" + variables: + VERSION: "8.0" + # Multiple mongos instances can be specified in the connection string + # for this version. + SAFE_FOR_MULTI_MONGOS: true - id: "7.0" display_name: "7.0" variables: @@ -2211,7 +2218,7 @@ buildvariants: - matrix_name: "tests-zstd-compression" matrix_spec: { compressor : "zstd", auth: "noauth", ssl: "nossl", jdk: "jdk8", - version: ["4.2", "4.4", "5.0", "6.0", "7.0", "latest"], + version: ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest"], topology: "standalone", os: "linux" } display_name: "${version} ${compressor} ${topology} ${auth} ${ssl} ${jdk} ${os} " tags: ["tests-variant"] @@ -2219,7 +2226,7 @@ buildvariants: - name: "test" - matrix_name: "tests-jdk8-unsecure" - matrix_spec: { auth: "noauth", ssl: "nossl", jdk: "jdk8", version: ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "latest"], + matrix_spec: { auth: "noauth", ssl: "nossl", jdk: "jdk8", version: ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest"], topology: "*", os: "linux" } display_name: "${version} ${topology} ${auth} ${ssl} ${jdk} ${os} " tags: ["tests-variant"] @@ -2228,7 +2235,7 @@ buildvariants: - matrix_name: "tests-jdk-secure" matrix_spec: { auth: "auth", ssl: "ssl", jdk: [ "jdk8", "jdk17", "jdk21"], - version: ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "latest" ], + version: ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], topology: "*", os: "linux" } display_name: "${version} ${topology} ${auth} ${ssl} ${jdk} ${os} " tags: ["tests-variant"] @@ -2243,7 +2250,7 @@ buildvariants: - name: "test" - matrix_name: "tests-require-api-version" - matrix_spec: { api-version: "required", auth: "auth", ssl: "nossl", jdk: ["jdk21"], version: ["5.0", "6.0", "7.0", "latest"], + matrix_spec: { api-version: "required", auth: "auth", ssl: "nossl", jdk: ["jdk21"], version: ["5.0", "6.0", "7.0", "8.0", "latest"], topology: "standalone", os: "linux" } display_name: "${version} ${topology} ${api-version} " tags: ["tests-variant"] @@ -2251,7 +2258,7 @@ buildvariants: - name: "test" - matrix_name: "tests-load-balancer-secure" - matrix_spec: { auth: "auth", ssl: "ssl", jdk: ["jdk21"], version: ["5.0", "6.0", "7.0", "latest"], topology: "sharded-cluster", + matrix_spec: { auth: "auth", ssl: "ssl", jdk: ["jdk21"], version: ["5.0", "6.0", "7.0", "8.0", "latest"], topology: "sharded-cluster", os: "ubuntu" } display_name: "Load Balancer ${version} ${auth} ${ssl} ${jdk} ${os}" tasks: @@ -2359,7 +2366,7 @@ buildvariants: batchtime: 20160 # 14 days - matrix_name: "aws-auth-test" - matrix_spec: { ssl: "nossl", jdk: ["jdk8", "jdk17", "jdk21"], version: ["4.4", "5.0", "6.0", "7.0", "latest"], os: "ubuntu", + matrix_spec: { ssl: "nossl", jdk: ["jdk8", "jdk17", "jdk21"], version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"], os: "ubuntu", aws-credential-provider: "*" } display_name: "MONGODB-AWS Basic Auth test ${version} ${jdk} ${aws-credential-provider}" run_on: ubuntu2004-small @@ -2378,14 +2385,15 @@ buildvariants: - name: "aws-auth-test-with-web-identity-credentials" - matrix_name: "accept-api-version-2-test" - matrix_spec: { ssl: "nossl", auth: "noauth", jdk: "jdk21", version: ["5.0", "6.0", "7.0", "latest"], topology: "standalone", os: "linux" } + matrix_spec: { ssl: "nossl", auth: "noauth", jdk: "jdk21", version: ["5.0", "6.0", "7.0", "8.0", "latest"], topology: "standalone", + os: "linux" } display_name: "Accept API Version 2 ${version}" run_on: ubuntu2004-small tasks: - name: "accept-api-version-2-test" - matrix_name: "ocsp-test" - matrix_spec: { auth: "noauth", ssl: "ssl", jdk: "jdk21", version: ["4.4", "5.0", "6.0", "7.0", "latest"], os: "ubuntu" } + matrix_spec: { auth: "noauth", ssl: "ssl", jdk: "jdk21", version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"], os: "ubuntu" } display_name: "OCSP test ${version} ${os}" tasks: - name: ".ocsp" @@ -2460,7 +2468,7 @@ buildvariants: - name: ".csfle-aws-from-environment" - matrix_name: "csfle-tests-with-mongocryptd" - matrix_spec: { os: "linux", version: [ "4.2", "4.4", "5.0", "6.0", "7.0", "latest" ], topology: ["replicaset"] } + matrix_spec: { os: "linux", version: [ "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], topology: ["replicaset"] } display_name: "CSFLE with mongocryptd: ${version}" tasks: - name: "csfle-tests-with-mongocryptd" From 3d36ccf158bff08b0ff90bcc5d22bb95dda6deb1 Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Thu, 9 May 2024 08:52:54 -0600 Subject: [PATCH 07/90] Allow empty commits for OIDC evergreen script (#1384) JAVA-5450 --- .evergreen/.evg.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index 489d08870d6..37b67c6e1e5 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -992,7 +992,7 @@ tasks: ${PREPARE_SHELL} cd src git add . - git commit -m "add files" + git commit --allow-empty -m "add files" # uncompressed tar used to allow appending .git folder export AZUREOIDC_DRIVERS_TAR_FILE=/tmp/mongo-java-driver.tar git archive -o $AZUREOIDC_DRIVERS_TAR_FILE HEAD @@ -1010,7 +1010,7 @@ tasks: ${PREPARE_SHELL} cd src git add . - git commit -m "add files" + git commit --allow-empty -m "add files" # uncompressed tar used to allow appending .git folder export GCPOIDC_DRIVERS_TAR_FILE=/tmp/mongo-java-driver.tar git archive -o $GCPOIDC_DRIVERS_TAR_FILE HEAD From 5c37b88313afef00a8fb4781dd84b1a383661373 Mon Sep 17 00:00:00 2001 From: ashni <105304831+ashni-mongodb@users.noreply.github.com> Date: Mon, 13 May 2024 10:55:45 -0400 Subject: [PATCH 08/90] Update README.md to use 5.x in Versioning section --- README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 95e894d1585..7d6de4dd0dc 100644 --- a/README.md +++ b/README.md @@ -8,13 +8,15 @@ Reference and API documentation for the Java driver is available [here](https:// Reference and API documentation for the Kotlin driver is available [here](https://www.mongodb.com/docs/drivers/kotlin/coroutine/current/). +Reference and API documentation for the Scala driver is available [here](https://www.mongodb.com/docs/languages/scala/scala-driver/current/). + ## Tutorials / Training For tutorials on how to use the MongoDB JVM Drivers, please reference [MongoDB University](https://learn.mongodb.com/). Additional tutorials, videos, and code examples using both the Java Driver and the Kotlin Driver can also be found in the [MongoDB Developer Center](https://www.mongodb.com/developer/). ## Support / Feedback -For issues with, questions about, or feedback for the MongoDB Java and Kotlin drivers, please look into +For issues with, questions about, or feedback for the MongoDB Java, Kotlin, and Scala drivers, please look into our [support channels](https://www.mongodb.com/docs/manual/support/). Please do not email any of the driver developers directly with issues or questions - you're more likely to get an answer on the [MongoDB Community Forums](https://community.mongodb.com/tags/c/drivers-odms-connectors/7/java-driver) or [StackOverflow](https://stackoverflow.com/questions/tagged/mongodb+java). @@ -26,7 +28,7 @@ any connectivity-related exceptions and post those as well. ## Bugs / Feature Requests -Think you’ve found a bug in the Java or Kotlin drivers? Want to see a new feature in the drivers? Please open a +Think you’ve found a bug in the Java, Kotlin, or Scala drivers? Want to see a new feature in the drivers? Please open a case in our issue management tool, JIRA: - [Create an account and login](https://jira.mongodb.org). @@ -40,16 +42,16 @@ MongoDB project, please report it according to the [instructions here](https://w ## Versioning -Major increments (such as 3.x -> 4.x) will occur when break changes are being made to the public API. All methods and +Major increments (such as 4.x -> 5.x) will occur when breaking changes are being made to the public API. All methods and classes removed in a major release will have been deprecated in a prior release of the previous major release branch, and/or otherwise called out in the release notes. -Minor 4.x increments (such as 4.1, 4.2, etc) will occur when non-trivial new functionality is added or significant enhancements or bug +Minor 5.x increments (such as 5.1, 5.2, etc) will occur when non-trivial new functionality is added or significant enhancements or bug fixes occur that may have behavioral changes that may affect some edge cases (such as dependence on behavior resulting from a bug). An example of an enhancement is a method or class added to support new functionality added to the MongoDB server. Minor releases will almost always be binary compatible with prior minor releases from the same major release branch, except as noted below. -Patch 4.x.y increments (such as 4.0.0 -> 4.0.1, 4.1.1 -> 4.1.2, etc) will occur for bug fixes only and will always be binary compatible +Patch 5.x.y increments (such as 5.0.0 -> 5.0.1, 5.1.1 -> 5.1.2, etc) will occur for bug fixes only and will always be binary compatible with prior patch releases of the same minor release branch. #### @Beta From 28a28f748bc113392bca5a09912b65ec64344be3 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Tue, 14 May 2024 10:09:10 -0400 Subject: [PATCH 09/90] Add BatchCursor interceptor in reactive tests (#1390) JAVA-5356 --------- Co-authored-by: slav.babanin --- .../client/syncadapter/SyncMongoClient.java | 23 ++++++ .../client/syncadapter/SyncMongoCursor.java | 74 +++++++++++++++++++ .../client/unified/ChangeStreamsTest.java | 20 ++++- 3 files changed, 113 insertions(+), 4 deletions(-) diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java index 170b33a3398..28d5adbdfc7 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java @@ -24,6 +24,7 @@ import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; import com.mongodb.connection.ClusterDescription; +import com.mongodb.reactivestreams.client.internal.BatchCursor; import org.bson.BsonDocument; import org.bson.Document; import org.bson.conversions.Bson; @@ -41,6 +42,7 @@ public class SyncMongoClient implements MongoClient { private static long sleepAfterCursorCloseMS; private static long sleepAfterSessionCloseMS; + private static boolean waitForBatchCursorCreation; /** * Unfortunately this is the only way to wait for a query to be initiated, since Reactive Streams is asynchronous @@ -88,6 +90,27 @@ public static void enableSleepAfterSessionClose(final long sleepMS) { sleepAfterSessionCloseMS = sleepMS; } + /** + * Enables behavior for waiting until a reactive {@link BatchCursor} is created. + *

+ * When enabled, {@link SyncMongoCursor} allows intercepting the result of the cursor creation process. + * If the creation fails, the resulting exception will be propagated; if successful, the + * process will proceed to issue getMore commands. + *

+ * NOTE: Do not enable when multiple cursors are being iterated concurrently. + */ + public static void enableWaitForBatchCursorCreation() { + waitForBatchCursorCreation = true; + } + + public static boolean isWaitForBatchCursorCreationEnabled() { + return waitForBatchCursorCreation; + } + + public static void disableWaitForBatchCursorCreation() { + waitForBatchCursorCreation = false; + } + public static void disableSleep() { sleepAfterCursorOpenMS = 0; sleepAfterCursorCloseMS = 0; diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java index c21cbc0e9f0..63485fba132 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java @@ -21,26 +21,36 @@ import com.mongodb.ServerCursor; import com.mongodb.client.MongoCursor; import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.internal.BatchCursor; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; +import reactor.core.CoreSubscriber; import reactor.core.publisher.Flux; +import reactor.core.publisher.Hooks; +import reactor.core.publisher.Operators; +import reactor.util.context.Context; import java.util.NoSuchElementException; import java.util.concurrent.BlockingDeque; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static com.mongodb.ClusterFixture.TIMEOUT; import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorClose; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorOpen; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.isWaitForBatchCursorCreationEnabled; class SyncMongoCursor implements MongoCursor { private static final Object COMPLETED = new Object(); private final BlockingDeque results = new LinkedBlockingDeque<>(); + private final CompletableFuture batchCursorCompletableFuture = new CompletableFuture<>(); private final Integer batchSize; private int countToBatchSize; private Subscription subscription; @@ -51,6 +61,15 @@ class SyncMongoCursor implements MongoCursor { SyncMongoCursor(final Publisher publisher, @Nullable final Integer batchSize) { this.batchSize = batchSize; CountDownLatch latch = new CountDownLatch(1); + + if (isWaitForBatchCursorCreationEnabled()) { + // This hook allows us to intercept the `onNext` and `onError` signals for any operation to determine + // whether the {@link BatchCursor} was created successfully or if an error occurred during its creation process. + // The result is propagated to a {@link CompletableFuture}, which we use to block until it is completed. + Hooks.onEachOperator(Operators.lift((sc, sub) -> + new BatchCursorInterceptSubscriber(sub, batchCursorCompletableFuture))); + } + //noinspection ReactiveStreamsSubscriberImplementation Flux.from(publisher).contextWrite(CONTEXT).subscribe(new Subscriber() { @Override @@ -83,9 +102,19 @@ public void onComplete() { if (!latch.await(TIMEOUT, TimeUnit.SECONDS)) { throw new MongoTimeoutException("Timeout waiting for subscription"); } + if (isWaitForBatchCursorCreationEnabled()) { + batchCursorCompletableFuture.get(TIMEOUT, TimeUnit.SECONDS); + Hooks.resetOnEachOperator(); + } sleep(getSleepAfterCursorOpen()); } catch (InterruptedException e) { throw interruptAndCreateMongoInterruptedException("Interrupted waiting for asynchronous cursor establishment", e); + } catch (ExecutionException | TimeoutException e) { + Throwable cause = e.getCause(); + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } + throw new RuntimeException(e); } } @@ -181,4 +210,49 @@ private RuntimeException translateError(final Throwable throwable) { } return new RuntimeException(throwable); } + + + private static final class BatchCursorInterceptSubscriber implements CoreSubscriber { + + private final CoreSubscriber sub; + private final CompletableFuture batchCursorCompletableFuture; + + BatchCursorInterceptSubscriber(final CoreSubscriber sub, + final CompletableFuture batchCursorCompletableFuture) { + this.sub = sub; + this.batchCursorCompletableFuture = batchCursorCompletableFuture; + } + + @Override + public Context currentContext() { + return sub.currentContext(); + } + + @Override + public void onSubscribe(final Subscription s) { + sub.onSubscribe(s); + } + + @Override + public void onNext(final Object o) { + if (o instanceof BatchCursor) { + // Interception of a cursor means that it has been created at this point. + batchCursorCompletableFuture.complete(o); + } + sub.onNext(o); + } + + @Override + public void onError(final Throwable t) { + if (!batchCursorCompletableFuture.isDone()) { // Cursor has not been created yet but an error occurred. + batchCursorCompletableFuture.completeExceptionally(t); + } + sub.onError(t); + } + + @Override + public void onComplete() { + sub.onComplete(); + } + } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java index fc7b196e1c8..f1b3c435b4b 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java @@ -29,17 +29,16 @@ import java.util.List; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableSleep; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableWaitForBatchCursorCreation; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorOpen; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableWaitForBatchCursorCreation; import static org.junit.Assume.assumeFalse; public final class ChangeStreamsTest extends UnifiedReactiveStreamsTest { private static final List ERROR_REQUIRED_FROM_CHANGE_STREAM_INITIALIZATION_TESTS = Arrays.asList( - "Test with document comment - pre 4.4", - "Change Stream should error when an invalid aggregation stage is passed in", - "The watch helper must not throw a custom exception when executed against a single server topology, " - + "but instead depend on a server error" + "Test with document comment - pre 4.4" ); private static final List EVENT_SENSITIVE_TESTS = @@ -48,6 +47,14 @@ public final class ChangeStreamsTest extends UnifiedReactiveStreamsTest { "Test that comment is not set on getMore - pre 4.4" ); + private static final List REQUIRES_BATCH_CURSOR_CREATION_WAITING = + Arrays.asList( + "Change Stream should error when an invalid aggregation stage is passed in", + "The watch helper must not throw a custom exception when executed against a single server topology, " + + "but instead depend on a server error" + ); + + public ChangeStreamsTest(@SuppressWarnings("unused") final String fileDescription, @SuppressWarnings("unused") final String testDescription, final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, @@ -58,12 +65,17 @@ public ChangeStreamsTest(@SuppressWarnings("unused") final String fileDescriptio assumeFalse(EVENT_SENSITIVE_TESTS.contains(testDescription)); enableSleepAfterCursorOpen(256); + + if (REQUIRES_BATCH_CURSOR_CREATION_WAITING.contains(testDescription)) { + enableWaitForBatchCursorCreation(); + } } @After public void cleanUp() { super.cleanUp(); disableSleep(); + disableWaitForBatchCursorCreation(); } @Parameterized.Parameters(name = "{0}: {1}") From 99a0c1e0fc40891ec51f43e2ccce249bd54eec7d Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Tue, 14 May 2024 12:07:33 -0600 Subject: [PATCH 10/90] Add empty SBOM Lite (#1387) JAVA-5449 --- sbom.json | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 sbom.json diff --git a/sbom.json b/sbom.json new file mode 100644 index 00000000000..ddfc1b15e9a --- /dev/null +++ b/sbom.json @@ -0,0 +1,7 @@ +{ + "serialNumber": "urn:uuid:a291eaa6-9c96-4c46-9fb1-474f745cf6f5", + "version": 1, + "$schema": "http://cyclonedx.org/schema/bom-1.5.schema.json", + "bomFormat": "CycloneDX", + "specVersion": "1.5" +} From 4a44a001db9ae9c64a4e52fcc0c24ff2e7bef28e Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Thu, 16 May 2024 07:51:47 -0400 Subject: [PATCH 11/90] Convert legacy retryable reads tests to unified format (#1330) JAVA-5344 --- .../test/resources/retryable-reads/README.rst | 178 -- .../retryable-reads/aggregate-merge.json | 98 -- .../aggregate-serverErrors.json | 1208 -------------- .../resources/retryable-reads/aggregate.json | 406 ----- ...angeStreams-client.watch-serverErrors.json | 740 --------- .../changeStreams-client.watch.json | 209 --- ...ngeStreams-db.coll.watch-serverErrors.json | 690 -------- .../changeStreams-db.coll.watch.json | 197 --- .../changeStreams-db.watch-serverErrors.json | 690 -------- .../changeStreams-db.watch.json | 197 --- .../retryable-reads/count-serverErrors.json | 586 ------- .../test/resources/retryable-reads/count.json | 179 --- .../countDocuments-serverErrors.json | 911 ----------- .../retryable-reads/countDocuments.json | 257 --- .../distinct-serverErrors.json | 838 ---------- .../resources/retryable-reads/distinct.json | 245 --- .../estimatedDocumentCount-serverErrors.json | 546 ------- .../estimatedDocumentCount.json | 166 -- .../retryable-reads/find-serverErrors.json | 962 ----------- .../test/resources/retryable-reads/find.json | 348 ---- .../retryable-reads/findOne-serverErrors.json | 732 --------- .../resources/retryable-reads/findOne.json | 223 --- .../gridfs-download-serverErrors.json | 925 ----------- .../retryable-reads/gridfs-download.json | 270 ---- .../gridfs-downloadByName-serverErrors.json | 849 ---------- .../gridfs-downloadByName.json | 250 --- .../listCollectionNames-serverErrors.json | 502 ------ .../retryable-reads/listCollectionNames.json | 150 -- .../listCollectionObjects-serverErrors.json | 502 ------ .../listCollectionObjects.json | 150 -- .../listCollections-serverErrors.json | 502 ------ .../retryable-reads/listCollections.json | 150 -- .../listDatabaseNames-serverErrors.json | 502 ------ .../retryable-reads/listDatabaseNames.json | 150 -- .../listDatabaseObjects-serverErrors.json | 502 ------ .../retryable-reads/listDatabaseObjects.json | 150 -- .../listDatabases-serverErrors.json | 502 ------ .../retryable-reads/listDatabases.json | 150 -- .../listIndexNames-serverErrors.json | 527 ------ .../retryable-reads/listIndexNames.json | 156 -- .../listIndexes-serverErrors.json | 527 ------ .../retryable-reads/listIndexes.json | 156 -- .../resources/retryable-reads/mapReduce.json | 189 --- .../retryable-reads/aggregate-merge.json | 143 ++ .../aggregate-serverErrors.json | 1430 +++++++++++++++++ .../retryable-reads/aggregate.json | 527 ++++++ ...angeStreams-client.watch-serverErrors.json | 959 +++++++++++ .../changeStreams-client.watch.json | 294 ++++ ...ngeStreams-db.coll.watch-serverErrors.json | 944 +++++++++++ .../changeStreams-db.coll.watch.json | 314 ++++ .../changeStreams-db.watch-serverErrors.json | 930 +++++++++++ .../changeStreams-db.watch.json | 303 ++++ .../retryable-reads/count-serverErrors.json | 808 ++++++++++ .../retryable-reads/count.json | 286 ++++ .../countDocuments-serverErrors.json | 1133 +++++++++++++ .../retryable-reads/countDocuments.json | 364 +++++ .../distinct-serverErrors.json | 1060 ++++++++++++ .../retryable-reads/distinct.json | 352 ++++ .../estimatedDocumentCount-serverErrors.json | 768 +++++++++ .../estimatedDocumentCount.json | 273 ++++ .../retryable-reads/exceededTimeLimit.json | 147 ++ .../retryable-reads/find-serverErrors.json | 1184 ++++++++++++++ .../retryable-reads/find.json | 498 ++++++ .../retryable-reads/findOne-serverErrors.json | 954 +++++++++++ .../retryable-reads/findOne.json | 330 ++++ .../gridfs-download-serverErrors.json | 1092 +++++++++++++ .../retryable-reads/gridfs-download.json | 367 +++++ .../gridfs-downloadByName-serverErrors.json | 1016 ++++++++++++ .../gridfs-downloadByName.json | 347 ++++ .../listCollectionNames-serverErrors.json | 710 ++++++++ .../retryable-reads/listCollectionNames.json | 243 +++ .../listCollectionObjects-serverErrors.json | 710 ++++++++ .../listCollectionObjects.json | 243 +++ .../listCollections-serverErrors.json | 710 ++++++++ .../retryable-reads/listCollections.json | 243 +++ .../listDatabaseNames-serverErrors.json | 696 ++++++++ .../retryable-reads/listDatabaseNames.json | 229 +++ .../listDatabaseObjects-serverErrors.json | 696 ++++++++ .../retryable-reads/listDatabaseObjects.json | 229 +++ .../listDatabases-serverErrors.json | 696 ++++++++ .../retryable-reads/listDatabases.json | 229 +++ .../listIndexNames-serverErrors.json | 749 +++++++++ .../retryable-reads/listIndexNames.json | 263 +++ .../listIndexes-serverErrors.json | 749 +++++++++ .../retryable-reads/listIndexes.json | 263 +++ .../retryable-reads/mapReduce.json | 284 ++++ .../client/RetryableReadsTest.java | 48 - .../unified/UnifiedRetryableReadsTest.java | 25 +- .../mongodb/scala/RetryableReadsTest.scala | 44 - .../client/AbstractRetryableReadsTest.java | 332 ---- .../mongodb/client/RetryableReadsTest.java | 34 - .../unified/UnifiedRetryableReadsTest.java | 20 +- .../mongodb/client/unified/UnifiedTest.java | 8 +- 93 files changed, 24807 insertions(+), 18836 deletions(-) delete mode 100644 driver-core/src/test/resources/retryable-reads/README.rst delete mode 100644 driver-core/src/test/resources/retryable-reads/aggregate-merge.json delete mode 100644 driver-core/src/test/resources/retryable-reads/aggregate-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/aggregate.json delete mode 100644 driver-core/src/test/resources/retryable-reads/changeStreams-client.watch-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/changeStreams-client.watch.json delete mode 100644 driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch.json delete mode 100644 driver-core/src/test/resources/retryable-reads/changeStreams-db.watch-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/changeStreams-db.watch.json delete mode 100644 driver-core/src/test/resources/retryable-reads/count-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/count.json delete mode 100644 driver-core/src/test/resources/retryable-reads/countDocuments-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/countDocuments.json delete mode 100644 driver-core/src/test/resources/retryable-reads/distinct-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/distinct.json delete mode 100644 driver-core/src/test/resources/retryable-reads/estimatedDocumentCount-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/estimatedDocumentCount.json delete mode 100644 driver-core/src/test/resources/retryable-reads/find-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/find.json delete mode 100644 driver-core/src/test/resources/retryable-reads/findOne-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/findOne.json delete mode 100644 driver-core/src/test/resources/retryable-reads/gridfs-download-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/gridfs-download.json delete mode 100644 driver-core/src/test/resources/retryable-reads/gridfs-downloadByName-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/gridfs-downloadByName.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listCollectionNames-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listCollectionNames.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listCollectionObjects-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listCollectionObjects.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listCollections-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listCollections.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listDatabaseNames-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listDatabaseNames.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listDatabaseObjects-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listDatabaseObjects.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listDatabases-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listDatabases.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listIndexNames-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listIndexNames.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listIndexes-serverErrors.json delete mode 100644 driver-core/src/test/resources/retryable-reads/listIndexes.json delete mode 100644 driver-core/src/test/resources/retryable-reads/mapReduce.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-merge.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/count-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/count.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/distinct-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/distinct.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/exceededTimeLimit.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/find-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/find.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/findOne-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/findOne.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes-serverErrors.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes.json create mode 100644 driver-core/src/test/resources/unified-test-format/retryable-reads/mapReduce.json delete mode 100644 driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsTest.java delete mode 100644 driver-scala/src/integration/scala/org/mongodb/scala/RetryableReadsTest.scala delete mode 100644 driver-sync/src/test/functional/com/mongodb/client/AbstractRetryableReadsTest.java delete mode 100644 driver-sync/src/test/functional/com/mongodb/client/RetryableReadsTest.java diff --git a/driver-core/src/test/resources/retryable-reads/README.rst b/driver-core/src/test/resources/retryable-reads/README.rst deleted file mode 100644 index 5efb3a2e137..00000000000 --- a/driver-core/src/test/resources/retryable-reads/README.rst +++ /dev/null @@ -1,178 +0,0 @@ -===================== -Retryable Reads Tests -===================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Retryable Reads spec. - -Prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Tests will require a MongoClient created with options defined in the tests. -Integration tests will require a running MongoDB cluster with server versions -4.0 or later. - -N.B. The spec specifies 3.6 as the minimum server version: however, -``failCommand`` is not supported on 3.6, so for now, testing requires MongoDB -4.0. Once `DRIVERS-560`_ is resolved, we will attempt to adapt its live failure -integration tests to test Retryable Reads on MongoDB 3.6. - -.. _DRIVERS-560: https://jira.mongodb.org/browse/DRIVERS-560 - -Server Fail Point -================= - -See: `Server Fail Point`_ in the Transactions spec test suite. - -.. _Server Fail Point: ../../transactions/tests#server-fail-point - -Disabling Fail Point after Test Execution ------------------------------------------ - -After each test that configures a fail point, drivers should disable the -``failCommand`` fail point to avoid spurious failures in -subsequent tests. The fail point may be disabled like so:: - - db.runCommand({ - configureFailPoint: "failCommand", - mode: "off" - }); - -Network Error Tests -=================== - -Network error tests are expressed in YAML and should be run against a standalone, -shard cluster, or single-node replica set. - - -Test Format ------------ - -Each YAML file has the following keys: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this file should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - - - ``topology`` (optional): An array of server topologies against which the - tests can be run successfully. Valid topologies are "single", - "replicaset", "sharded", and "load-balanced". If this field is omitted, - the default is all topologies (i.e. ``["single", "replicaset", "sharded", - "load-balanced"]``). - -- ``database_name`` and ``collection_name``: Optional. The database and - collection to use for testing. - -- ``bucket_name``: Optional. The GridFS bucket name to use for testing. - -- ``data``: The data that should exist in the collection(s) under test before - each test run. This will typically be an array of documents to be inserted - into the collection under test (i.e. ``collection_name``); however, this field - may also be an object mapping collection names to arrays of documents to be - inserted into the specified collection. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: The name of the test. - - - ``clientOptions``: Optional, parameters to pass to MongoClient(). - - - ``useMultipleMongoses`` (optional): If ``true``, the MongoClient for this - test should be initialized with multiple mongos seed addresses. If ``false`` - or omitted, only a single mongos address should be specified. This field has - no effect for non-sharded topologies. - - - ``skipReason``: Optional, string describing why this test should be skipped. - - - ``failPoint``: Optional, a server fail point to enable, expressed as the - configureFailPoint command to run on the admin database. - - - ``operations``: An array of documents describing an operation to be - executed. Each document has the following fields: - - - ``name``: The name of the operation on ``object``. - - - ``object``: The name of the object to perform the operation on. Can be - "database", "collection", "client", or "gridfsbucket." - - - ``arguments``: Optional, the names and values of arguments. - - - ``result``: Optional. The return value from the operation, if any. This - field may be a scalar (e.g. in the case of a count), a single document, or - an array of documents in the case of a multi-document read. - - - ``error``: Optional. If ``true``, the test should expect an error or - exception. - - - ``expectations``: Optional list of command-started events. - -GridFS Tests ------------- - -GridFS tests are denoted by when the YAML file contains ``bucket_name``. -The ``data`` field will also be an object, which maps collection names -(e.g. ``fs.files``) to an array of documents that should be inserted into -the specified collection. - -``fs.files`` and ``fs.chunks`` should be created in the database -specified by ``database_name``. This could be done via inserts or by -creating GridFSBuckets—using the GridFS ``bucketName`` (see -`GridFSBucket spec`_) specified by ``bucket_name`` field in the YAML -file—and calling ``upload_from_stream_with_id`` with the appropriate -data. - -``Download`` tests should be tested against ``GridFS.download_to_stream``. -``DownloadByName`` tests should be tested against -``GridFS.download_to_stream_by_name``. - - -.. _GridFSBucket spec: https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst#configurable-gridfsbucket-class - -Speeding Up Tests ------------------ - -Drivers can greatly reduce the execution time of tests by setting `heartbeatFrequencyMS`_ -and `minHeartbeatFrequencyMS`_ (internally) to a small value (e.g. 5ms), below what -is normally permitted in the SDAM spec. If a test specifies an explicit value for -heartbeatFrequencyMS (e.g. client or URI options), drivers MUST use that value. - -.. _minHeartbeatFrequencyMS: ../../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#minheartbeatfrequencyms -.. _heartbeatFrequencyMS: ../../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#heartbeatfrequencyms - -Optional Enumeration Commands -============================= - -A driver only needs to test the optional enumeration commands it has chosen to -implement (e.g. ``Database.listCollectionNames()``). - -Changelog -========= - -:2019-03-19: Add top-level ``runOn`` field to denote server version and/or - topology requirements requirements for the test file. Removes the - ``minServerVersion`` and ``topology`` top-level fields, which are - now expressed within ``runOn`` elements. - - Add test-level ``useMultipleMongoses`` field. - -:2020-09-16: Suggest lowering heartbeatFrequencyMS in addition to minHeartbeatFrequencyMS. diff --git a/driver-core/src/test/resources/retryable-reads/aggregate-merge.json b/driver-core/src/test/resources/retryable-reads/aggregate-merge.json deleted file mode 100644 index b401d741ba5..00000000000 --- a/driver-core/src/test/resources/retryable-reads/aggregate-merge.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.11" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Aggregate with $merge does not retry", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$merge": { - "into": "output-collection" - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$merge": { - "into": "output-collection" - } - } - ] - }, - "command_name": "aggregate", - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/aggregate-serverErrors.json b/driver-core/src/test/resources/retryable-reads/aggregate-serverErrors.json deleted file mode 100644 index 1155f808dcc..00000000000 --- a/driver-core/src/test/resources/retryable-reads/aggregate-serverErrors.json +++ /dev/null @@ -1,1208 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Aggregate succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/aggregate.json b/driver-core/src/test/resources/retryable-reads/aggregate.json deleted file mode 100644 index f23d5c67939..00000000000 --- a/driver-core/src/test/resources/retryable-reads/aggregate.json +++ /dev/null @@ -1,406 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Aggregate succeeds on first attempt", - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate with $out does not retry", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$out": "output-collection" - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$out": "output-collection" - } - ] - }, - "command_name": "aggregate", - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/changeStreams-client.watch-serverErrors.json b/driver-core/src/test/resources/retryable-reads/changeStreams-client.watch-serverErrors.json deleted file mode 100644 index 73dbfee916f..00000000000 --- a/driver-core/src/test/resources/retryable-reads/changeStreams-client.watch-serverErrors.json +++ /dev/null @@ -1,740 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "client.watch succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/changeStreams-client.watch.json b/driver-core/src/test/resources/retryable-reads/changeStreams-client.watch.json deleted file mode 100644 index 30a53037ad2..00000000000 --- a/driver-core/src/test/resources/retryable-reads/changeStreams-client.watch.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "client.watch succeeds on first attempt", - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch-serverErrors.json b/driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch-serverErrors.json deleted file mode 100644 index 77b3af04f45..00000000000 --- a/driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch-serverErrors.json +++ /dev/null @@ -1,690 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "db.coll.watch succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch.json b/driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch.json deleted file mode 100644 index 27f6105a4bb..00000000000 --- a/driver-core/src/test/resources/retryable-reads/changeStreams-db.coll.watch.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "db.coll.watch succeeds on first attempt", - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/changeStreams-db.watch-serverErrors.json b/driver-core/src/test/resources/retryable-reads/changeStreams-db.watch-serverErrors.json deleted file mode 100644 index 7a875345080..00000000000 --- a/driver-core/src/test/resources/retryable-reads/changeStreams-db.watch-serverErrors.json +++ /dev/null @@ -1,690 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "db.watch succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/changeStreams-db.watch.json b/driver-core/src/test/resources/retryable-reads/changeStreams-db.watch.json deleted file mode 100644 index e6b0b9b781e..00000000000 --- a/driver-core/src/test/resources/retryable-reads/changeStreams-db.watch.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "db.watch succeeds on first attempt", - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/count-serverErrors.json b/driver-core/src/test/resources/retryable-reads/count-serverErrors.json deleted file mode 100644 index 36a0c17cab0..00000000000 --- a/driver-core/src/test/resources/retryable-reads/count-serverErrors.json +++ /dev/null @@ -1,586 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "Count succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/count.json b/driver-core/src/test/resources/retryable-reads/count.json deleted file mode 100644 index 139a5451318..00000000000 --- a/driver-core/src/test/resources/retryable-reads/count.json +++ /dev/null @@ -1,179 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "Count succeeds on first attempt", - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/countDocuments-serverErrors.json b/driver-core/src/test/resources/retryable-reads/countDocuments-serverErrors.json deleted file mode 100644 index 782ea5e4f18..00000000000 --- a/driver-core/src/test/resources/retryable-reads/countDocuments-serverErrors.json +++ /dev/null @@ -1,911 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "CountDocuments succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/countDocuments.json b/driver-core/src/test/resources/retryable-reads/countDocuments.json deleted file mode 100644 index 57a64e45b79..00000000000 --- a/driver-core/src/test/resources/retryable-reads/countDocuments.json +++ /dev/null @@ -1,257 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "CountDocuments succeeds on first attempt", - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/distinct-serverErrors.json b/driver-core/src/test/resources/retryable-reads/distinct-serverErrors.json deleted file mode 100644 index d7c6018a624..00000000000 --- a/driver-core/src/test/resources/retryable-reads/distinct-serverErrors.json +++ /dev/null @@ -1,838 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Distinct succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/distinct.json b/driver-core/src/test/resources/retryable-reads/distinct.json deleted file mode 100644 index 1fd415da812..00000000000 --- a/driver-core/src/test/resources/retryable-reads/distinct.json +++ /dev/null @@ -1,245 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Distinct succeeds on first attempt", - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/estimatedDocumentCount-serverErrors.json b/driver-core/src/test/resources/retryable-reads/estimatedDocumentCount-serverErrors.json deleted file mode 100644 index 6bb128f5f37..00000000000 --- a/driver-core/src/test/resources/retryable-reads/estimatedDocumentCount-serverErrors.json +++ /dev/null @@ -1,546 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/estimatedDocumentCount.json b/driver-core/src/test/resources/retryable-reads/estimatedDocumentCount.json deleted file mode 100644 index 8dfa15a2cdb..00000000000 --- a/driver-core/src/test/resources/retryable-reads/estimatedDocumentCount.json +++ /dev/null @@ -1,166 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds on first attempt", - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/find-serverErrors.json b/driver-core/src/test/resources/retryable-reads/find-serverErrors.json deleted file mode 100644 index f6b96c6dcb3..00000000000 --- a/driver-core/src/test/resources/retryable-reads/find-serverErrors.json +++ /dev/null @@ -1,962 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "Find succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/find.json b/driver-core/src/test/resources/retryable-reads/find.json deleted file mode 100644 index 00d419c0da6..00000000000 --- a/driver-core/src/test/resources/retryable-reads/find.json +++ /dev/null @@ -1,348 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "Find succeeds on first attempt", - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds on second attempt with explicit clientOptions", - "clientOptions": { - "retryReads": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/findOne-serverErrors.json b/driver-core/src/test/resources/retryable-reads/findOne-serverErrors.json deleted file mode 100644 index d039ef247e0..00000000000 --- a/driver-core/src/test/resources/retryable-reads/findOne-serverErrors.json +++ /dev/null @@ -1,732 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "FindOne succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/findOne.json b/driver-core/src/test/resources/retryable-reads/findOne.json deleted file mode 100644 index b9deb73d2ab..00000000000 --- a/driver-core/src/test/resources/retryable-reads/findOne.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "FindOne succeeds on first attempt", - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/gridfs-download-serverErrors.json b/driver-core/src/test/resources/retryable-reads/gridfs-download-serverErrors.json deleted file mode 100644 index cec3a5016a4..00000000000 --- a/driver-core/src/test/resources/retryable-reads/gridfs-download-serverErrors.json +++ /dev/null @@ -1,925 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "Download succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/gridfs-download.json b/driver-core/src/test/resources/retryable-reads/gridfs-download.json deleted file mode 100644 index 4d0d5a17e4d..00000000000 --- a/driver-core/src/test/resources/retryable-reads/gridfs-download.json +++ /dev/null @@ -1,270 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "Download succeeds on first attempt", - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/gridfs-downloadByName-serverErrors.json b/driver-core/src/test/resources/retryable-reads/gridfs-downloadByName-serverErrors.json deleted file mode 100644 index a64230d38ab..00000000000 --- a/driver-core/src/test/resources/retryable-reads/gridfs-downloadByName-serverErrors.json +++ /dev/null @@ -1,849 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "DownloadByName succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/gridfs-downloadByName.json b/driver-core/src/test/resources/retryable-reads/gridfs-downloadByName.json deleted file mode 100644 index 48f2168cfc3..00000000000 --- a/driver-core/src/test/resources/retryable-reads/gridfs-downloadByName.json +++ /dev/null @@ -1,250 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "DownloadByName succeeds on first attempt", - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listCollectionNames-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listCollectionNames-serverErrors.json deleted file mode 100644 index bbdce625ada..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listCollectionNames-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionNames succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listCollectionNames.json b/driver-core/src/test/resources/retryable-reads/listCollectionNames.json deleted file mode 100644 index 73d96a3cf7a..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listCollectionNames.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionNames succeeds on first attempt", - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listCollectionObjects-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listCollectionObjects-serverErrors.json deleted file mode 100644 index ab469dfe30b..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listCollectionObjects-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionObjects succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listCollectionObjects.json b/driver-core/src/test/resources/retryable-reads/listCollectionObjects.json deleted file mode 100644 index 1fb0f184374..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listCollectionObjects.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionObjects succeeds on first attempt", - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listCollections-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listCollections-serverErrors.json deleted file mode 100644 index def9ac4595c..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listCollections-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollections succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listCollections.json b/driver-core/src/test/resources/retryable-reads/listCollections.json deleted file mode 100644 index 2427883621c..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listCollections.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollections succeeds on first attempt", - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listDatabaseNames-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listDatabaseNames-serverErrors.json deleted file mode 100644 index 1dd8e4415aa..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listDatabaseNames-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseNames succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listDatabaseNames.json b/driver-core/src/test/resources/retryable-reads/listDatabaseNames.json deleted file mode 100644 index b431f570161..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listDatabaseNames.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseNames succeeds on first attempt", - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listDatabaseObjects-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listDatabaseObjects-serverErrors.json deleted file mode 100644 index bc497bb088c..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listDatabaseObjects-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseObjects succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listDatabaseObjects.json b/driver-core/src/test/resources/retryable-reads/listDatabaseObjects.json deleted file mode 100644 index 267fe921cab..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listDatabaseObjects.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseObjects succeeds on first attempt", - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listDatabases-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listDatabases-serverErrors.json deleted file mode 100644 index ed7bcbc3989..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listDatabases-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabases succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listDatabases.json b/driver-core/src/test/resources/retryable-reads/listDatabases.json deleted file mode 100644 index 69ef9788f8d..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listDatabases.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabases succeeds on first attempt", - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listIndexNames-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listIndexNames-serverErrors.json deleted file mode 100644 index 2d3265ec85d..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listIndexNames-serverErrors.json +++ /dev/null @@ -1,527 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexNames succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listIndexNames.json b/driver-core/src/test/resources/retryable-reads/listIndexNames.json deleted file mode 100644 index fbdb420f8ad..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listIndexNames.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexNames succeeds on first attempt", - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listIndexes-serverErrors.json b/driver-core/src/test/resources/retryable-reads/listIndexes-serverErrors.json deleted file mode 100644 index 25c5b0e4483..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listIndexes-serverErrors.json +++ /dev/null @@ -1,527 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexes succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/listIndexes.json b/driver-core/src/test/resources/retryable-reads/listIndexes.json deleted file mode 100644 index 5cb620ae45a..00000000000 --- a/driver-core/src/test/resources/retryable-reads/listIndexes.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexes succeeds on first attempt", - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/retryable-reads/mapReduce.json b/driver-core/src/test/resources/retryable-reads/mapReduce.json deleted file mode 100644 index 9327a23052b..00000000000 --- a/driver-core/src/test/resources/retryable-reads/mapReduce.json +++ /dev/null @@ -1,189 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 0 - }, - { - "_id": 2, - "x": 1 - }, - { - "_id": 3, - "x": 2 - } - ], - "tests": [ - { - "description": "MapReduce succeeds with retry on", - "operations": [ - { - "name": "mapReduce", - "object": "collection", - "arguments": { - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "result": [ - { - "_id": 0, - "value": 6 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "mapReduce": "coll", - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "MapReduce fails with retry on", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "mapReduce" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "mapReduce", - "object": "collection", - "arguments": { - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "mapReduce": "coll", - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "MapReduce fails with retry off", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "mapReduce" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "mapReduce", - "object": "collection", - "arguments": { - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "mapReduce": "coll", - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-merge.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-merge.json new file mode 100644 index 00000000000..96bbd0fc386 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-merge.json @@ -0,0 +1,143 @@ +{ + "description": "aggregate-merge", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $merge does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-serverErrors.json new file mode 100644 index 00000000000..d39835a5d36 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate-serverErrors.json @@ -0,0 +1,1430 @@ +{ + "description": "aggregate-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate.json new file mode 100644 index 00000000000..2b504c8d49f --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/aggregate.json @@ -0,0 +1,527 @@ +{ + "description": "aggregate", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $out does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "output-collection" + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "output-collection" + } + ] + }, + "commandName": "aggregate", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch-serverErrors.json new file mode 100644 index 00000000000..47375974d29 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch-serverErrors.json @@ -0,0 +1,959 @@ +{ + "description": "changeStreams-client.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "tests": [ + { + "description": "client.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch.json new file mode 100644 index 00000000000..95ddaf921d6 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-client.watch.json @@ -0,0 +1,294 @@ +{ + "description": "changeStreams-client.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "tests": [ + { + "description": "client.watch succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch-serverErrors.json new file mode 100644 index 00000000000..589d0a3c37a --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch-serverErrors.json @@ -0,0 +1,944 @@ +{ + "description": "changeStreams-db.coll.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.coll.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch.json new file mode 100644 index 00000000000..bbea2ffe4fe --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.coll.watch.json @@ -0,0 +1,314 @@ +{ + "description": "changeStreams-db.coll.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.coll.watch succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch-serverErrors.json new file mode 100644 index 00000000000..6c12d7ddd86 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch-serverErrors.json @@ -0,0 +1,930 @@ +{ + "description": "changeStreams-db.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch.json new file mode 100644 index 00000000000..1b6d911c76e --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/changeStreams-db.watch.json @@ -0,0 +1,303 @@ +{ + "description": "changeStreams-db.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.watch succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/count-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/count-serverErrors.json new file mode 100644 index 00000000000..c52edfdb988 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/count-serverErrors.json @@ -0,0 +1,808 @@ +{ + "description": "count-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Count succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/count.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/count.json new file mode 100644 index 00000000000..d5c9a343a9a --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/count.json @@ -0,0 +1,286 @@ +{ + "description": "count", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Count succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments-serverErrors.json new file mode 100644 index 00000000000..fd028b114c1 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments-serverErrors.json @@ -0,0 +1,1133 @@ +{ + "description": "countDocuments-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "CountDocuments succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments.json new file mode 100644 index 00000000000..e06e89c1ad6 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/countDocuments.json @@ -0,0 +1,364 @@ +{ + "description": "countDocuments", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "CountDocuments succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/distinct-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/distinct-serverErrors.json new file mode 100644 index 00000000000..79d2d5fc31c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/distinct-serverErrors.json @@ -0,0 +1,1060 @@ +{ + "description": "distinct-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/distinct.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/distinct.json new file mode 100644 index 00000000000..81f1f66e917 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/distinct.json @@ -0,0 +1,352 @@ +{ + "description": "distinct", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount-serverErrors.json new file mode 100644 index 00000000000..ba983c6cdf0 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount-serverErrors.json @@ -0,0 +1,768 @@ +{ + "description": "estimatedDocumentCount-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount.json new file mode 100644 index 00000000000..75a676b9b61 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/estimatedDocumentCount.json @@ -0,0 +1,273 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/exceededTimeLimit.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/exceededTimeLimit.json new file mode 100644 index 00000000000..8d090bbe3f6 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/exceededTimeLimit.json @@ -0,0 +1,147 @@ +{ + "description": "ExceededTimeLimit is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "exceededtimelimit-test" + } + } + ], + "initialData": [ + { + "collectionName": "exceededtimelimit-test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 262 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/find-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/find-serverErrors.json new file mode 100644 index 00000000000..ab3dbe45f4f --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/find-serverErrors.json @@ -0,0 +1,1184 @@ +{ + "description": "find-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/find.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/find.json new file mode 100644 index 00000000000..30c4c5e4787 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/find.json @@ -0,0 +1,498 @@ +{ + "description": "find", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt with explicit clientOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": true + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/findOne-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/findOne-serverErrors.json new file mode 100644 index 00000000000..7adda1e32b6 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/findOne-serverErrors.json @@ -0,0 +1,954 @@ +{ + "description": "findOne-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "FindOne succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/findOne.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/findOne.json new file mode 100644 index 00000000000..4314a19e46f --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/findOne.json @@ -0,0 +1,330 @@ +{ + "description": "findOne", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "FindOne succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download-serverErrors.json new file mode 100644 index 00000000000..5bb7eee0b23 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download-serverErrors.json @@ -0,0 +1,1092 @@ +{ + "description": "gridfs-download-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Download succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket1", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download.json new file mode 100644 index 00000000000..69fe8ff7c85 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-download.json @@ -0,0 +1,367 @@ +{ + "description": "gridfs-download", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Download succeeds on first attempt", + "operations": [ + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket1", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName-serverErrors.json new file mode 100644 index 00000000000..35f7e1e563f --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName-serverErrors.json @@ -0,0 +1,1016 @@ +{ + "description": "gridfs-downloadByName-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "DownloadByName succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket1", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName.json new file mode 100644 index 00000000000..c3fa873396f --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/gridfs-downloadByName.json @@ -0,0 +1,347 @@ +{ + "description": "gridfs-downloadByName", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "DownloadByName succeeds on first attempt", + "operations": [ + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket1", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames-serverErrors.json new file mode 100644 index 00000000000..162dd4cee08 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollectionNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames.json new file mode 100644 index 00000000000..0fe575f7a6d --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionNames.json @@ -0,0 +1,243 @@ +{ + "description": "listCollectionNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionNames succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects-serverErrors.json new file mode 100644 index 00000000000..8b9d582c102 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollectionObjects-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionObjects succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects.json new file mode 100644 index 00000000000..9cdbb692763 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollectionObjects.json @@ -0,0 +1,243 @@ +{ + "description": "listCollectionObjects", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionObjects succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections-serverErrors.json new file mode 100644 index 00000000000..171fe7457f0 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollections-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollections succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections.json new file mode 100644 index 00000000000..b6152f9ce53 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listCollections.json @@ -0,0 +1,243 @@ +{ + "description": "listCollections", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollections succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames-serverErrors.json new file mode 100644 index 00000000000..489ff0ad512 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabaseNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames.json new file mode 100644 index 00000000000..5590f39a51e --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseNames.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabaseNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseNames succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects-serverErrors.json new file mode 100644 index 00000000000..56f9f362363 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabaseObjects-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseObjects succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects.json new file mode 100644 index 00000000000..46b1511d46c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabaseObjects.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabaseObjects", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseObjects succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases-serverErrors.json new file mode 100644 index 00000000000..09b935a59f4 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabases-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabases succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases.json new file mode 100644 index 00000000000..4cf5eccc7bd --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listDatabases.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabases", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabases succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames-serverErrors.json new file mode 100644 index 00000000000..7b98111480c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames-serverErrors.json @@ -0,0 +1,749 @@ +{ + "description": "listIndexNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames.json new file mode 100644 index 00000000000..c5fe967ff57 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexNames.json @@ -0,0 +1,263 @@ +{ + "description": "listIndexNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexNames succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes-serverErrors.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes-serverErrors.json new file mode 100644 index 00000000000..0110a0acd0b --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes-serverErrors.json @@ -0,0 +1,749 @@ +{ + "description": "listIndexes-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexes succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes.json new file mode 100644 index 00000000000..2560e4961cc --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/listIndexes.json @@ -0,0 +1,263 @@ +{ + "description": "listIndexes", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexes succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/retryable-reads/mapReduce.json b/driver-core/src/test/resources/unified-test-format/retryable-reads/mapReduce.json new file mode 100644 index 00000000000..745c0ef001a --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/retryable-reads/mapReduce.json @@ -0,0 +1,284 @@ +{ + "description": "mapReduce", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 0 + }, + { + "_id": 2, + "x": 1 + }, + { + "_id": 3, + "x": 2 + } + ] + } + ], + "tests": [ + { + "description": "MapReduce succeeds with retry on", + "operations": [ + { + "object": "collection0", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectResult": [ + { + "_id": 0, + "value": 6 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "MapReduce fails with retry on", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "mapReduce" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "MapReduce fails with retry off", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "mapReduce" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsTest.java deleted file mode 100644 index 84bed3cd28c..00000000000 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsTest.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.reactivestreams.client; - -import com.mongodb.MongoClientSettings; -import com.mongodb.client.AbstractRetryableReadsTest; -import com.mongodb.client.MongoClient; -import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.bson.BsonString; - -import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT_PROVIDER; -import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.assertContextPassedThrough; - -public class RetryableReadsTest extends AbstractRetryableReadsTest { - public RetryableReadsTest(final String filename, final String description, final String databaseName, final String collectionName, - final BsonArray data, final BsonString bucketName, final BsonDocument definition, final boolean skipTest) { - super(filename, description, databaseName, collectionName, data, bucketName, definition, skipTest); - } - - @Override - protected MongoClient createMongoClient(final MongoClientSettings settings) { - return new SyncMongoClient(MongoClients.create( - MongoClientSettings.builder(settings).contextProvider(CONTEXT_PROVIDER).build() - )); - } - - @Override - public void shouldPassAllOutcomes() { - super.shouldPassAllOutcomes(); - assertContextPassedThrough(getDefinition()); - } -} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java index 845c838f3fb..540cb0673bb 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java @@ -18,21 +18,34 @@ import org.bson.BsonArray; import org.bson.BsonDocument; +import org.junit.After; import org.junit.runners.Parameterized; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static com.mongodb.client.unified.UnifiedRetryableReadsTest.customSkips; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableWaitForBatchCursorCreation; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableWaitForBatchCursorCreation; public class UnifiedRetryableReadsTest extends UnifiedReactiveStreamsTest { - public UnifiedRetryableReadsTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { + public UnifiedRetryableReadsTest(final String fileDescription, final String testDescription, final String schemaVersion, + final BsonArray runOnRequirements, final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - assumeFalse(testDescription.contains("createChangeStream succeeds after retryable handshake")); + customSkips(fileDescription, testDescription); + if (fileDescription.startsWith("changeStreams") || testDescription.contains("ChangeStream")) { + // Several reactive change stream tests fail if we don't block waiting for batch cursor creation. + enableWaitForBatchCursorCreation(); + // The reactive driver will execute extra getMore commands for change streams. Ignore them. + ignoreExtraEvents(); + } + } + + @After + public void cleanUp() { + super.cleanUp(); + disableWaitForBatchCursorCreation(); } @Parameterized.Parameters(name = "{0}: {1}") diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/RetryableReadsTest.scala b/driver-scala/src/integration/scala/org/mongodb/scala/RetryableReadsTest.scala deleted file mode 100644 index 7cc8a971cd9..00000000000 --- a/driver-scala/src/integration/scala/org/mongodb/scala/RetryableReadsTest.scala +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.mongodb.scala - -import com.mongodb.client.AbstractRetryableReadsTest -import org.bson.{ BsonArray, BsonDocument, BsonString } -import org.mongodb.scala.syncadapter.SyncMongoClient - -class RetryableReadsTests( - val filename: String, - val description: String, - val databaseName: String, - val collectionName: String, - val data: BsonArray, - val bucketName: BsonString, - val definition: BsonDocument, - val skipTest: Boolean -) extends AbstractRetryableReadsTest( - filename, - description, - databaseName, - collectionName, - data, - bucketName, - definition, - skipTest - ) { - override protected def createMongoClient(settings: com.mongodb.MongoClientSettings) = - SyncMongoClient(MongoClient(settings)) -} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractRetryableReadsTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractRetryableReadsTest.java deleted file mode 100644 index 1df7174e246..00000000000 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractRetryableReadsTest.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.client; - -import com.mongodb.ConnectionString; -import com.mongodb.MongoClientSettings; -import com.mongodb.MongoException; -import com.mongodb.MongoNamespace; -import com.mongodb.ReadConcern; -import com.mongodb.ReadConcernLevel; -import com.mongodb.ReadPreference; -import com.mongodb.WriteConcern; -import com.mongodb.client.gridfs.GridFSBucket; -import com.mongodb.client.gridfs.GridFSBuckets; -import com.mongodb.client.test.CollectionHelper; -import com.mongodb.event.CommandEvent; -import com.mongodb.internal.connection.TestCommandListener; -import org.bson.BsonArray; -import org.bson.BsonBinary; -import org.bson.BsonBoolean; -import org.bson.BsonDocument; -import org.bson.BsonInt64; -import org.bson.BsonString; -import org.bson.BsonValue; -import org.bson.Document; -import org.bson.codecs.BsonDocumentCodec; -import org.bson.codecs.DocumentCodec; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import util.Hex; -import util.JsonPoweredTestHelper; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static com.mongodb.ClusterFixture.getConnectionString; -import static com.mongodb.ClusterFixture.getMultiMongosConnectionString; -import static com.mongodb.ClusterFixture.isSharded; -import static com.mongodb.JsonTestServerVersionChecker.skipTest; -import static com.mongodb.client.CommandMonitoringTestHelper.assertEventsEquality; -import static com.mongodb.client.CommandMonitoringTestHelper.getExpectedEvents; -import static com.mongodb.client.Fixture.getDefaultDatabaseName; -import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; -import static java.util.Collections.singletonList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assume.assumeFalse; -import static org.junit.Assume.assumeTrue; - -// See https://github.com/mongodb/specifications/tree/master/source/retryable-writes/tests -@RunWith(Parameterized.class) -public abstract class AbstractRetryableReadsTest { - private final String filename; - private final String description; - private final String databaseName; - private final String collectionName; - private final String gridFSBucketName; - private final BsonDocument gridFSData; - private final BsonArray data; - private final BsonDocument definition; - private final boolean skipTest; - private MongoClient mongoClient; - private CollectionHelper collectionHelper; - private MongoCollection collection; - private final TestCommandListener commandListener; - private JsonPoweredCrudTestHelper helper; - private GridFSBucket gridFSBucket; - private MongoCollection filesCollection; - private MongoCollection chunksCollection; - private boolean useMultipleMongoses = false; - - public AbstractRetryableReadsTest(final String filename, final String description, final String databaseName, - final String collectionName, final BsonArray data, final BsonString bucketName, - final BsonDocument definition, final boolean skipTest) { - this.filename = filename; - this.description = description; - this.databaseName = databaseName; - this.collectionName = collectionName; - this.definition = definition; - this.gridFSBucketName = (bucketName != null ? bucketName.getValue() : null); - this.gridFSData = (bucketName != null ? (BsonDocument) data.get(0) : null); - this.data = (bucketName != null ? null : data); - this.commandListener = new TestCommandListener(); - this.skipTest = skipTest; - } - - protected abstract MongoClient createMongoClient(MongoClientSettings settings); - - protected BsonDocument getDefinition() { - return definition; - } - - @Before - public void setUp() { - assumeFalse(skipTest); - assumeTrue("Skipping test: " + definition.getString("skipReason", new BsonString("")).getValue(), - !definition.containsKey("skipReason")); - assumeFalse("Skipping count tests", filename.startsWith("count.") || filename.startsWith("count-")); - assumeFalse("Skipping list index names tests", filename.startsWith("listIndexNames")); - - collectionHelper = new CollectionHelper<>(new DocumentCodec(), new MongoNamespace(databaseName, collectionName)); - BsonDocument clientOptions = definition.getDocument("clientOptions", new BsonDocument()); - - ConnectionString connectionString = getConnectionString(); - useMultipleMongoses = definition.getBoolean("useMultipleMongoses", BsonBoolean.FALSE).getValue(); - if (useMultipleMongoses) { - assumeTrue(isSharded()); - connectionString = getMultiMongosConnectionString(); - assumeTrue("The system property org.mongodb.test.multi.mongos.uri is not set.", connectionString != null); - } - - MongoClientSettings settings = getMongoClientSettingsBuilder() - .applyConnectionString(connectionString) - .addCommandListener(commandListener) - .applyToSocketSettings(builder -> builder.readTimeout(5, TimeUnit.SECONDS)) - .applyToServerSettings(builder -> builder.heartbeatFrequency(5, TimeUnit.MILLISECONDS)) - .writeConcern(getWriteConcern(clientOptions)) - .readConcern(getReadConcern(clientOptions)) - .readPreference(getReadPreference(clientOptions)) - .retryWrites(clientOptions.getBoolean("retryWrites", BsonBoolean.FALSE).getValue()) - .retryReads(clientOptions.getBoolean("retryReads", BsonBoolean.TRUE).getValue()) - .build(); - - mongoClient = createMongoClient(settings); - - if (data != null) { - List documents = new ArrayList<>(); - for (BsonValue document : data) { - documents.add(document.asDocument()); - } - - collectionHelper.drop(); - if (documents.size() > 0) { - collectionHelper.insertDocuments(documents); - } - } - - MongoDatabase database = mongoClient.getDatabase(databaseName); - if (gridFSBucketName != null) { - setupGridFSBuckets(database); - commandListener.reset(); - } - collection = database.getCollection(collectionName, BsonDocument.class); - helper = new JsonPoweredCrudTestHelper(description, database, collection, gridFSBucket, mongoClient); - if (definition.containsKey("failPoint")) { - collectionHelper.runAdminCommand(definition.getDocument("failPoint")); - } - } - - private ReadConcern getReadConcern(final BsonDocument clientOptions) { - if (clientOptions.containsKey("readConcernLevel")) { - return new ReadConcern(ReadConcernLevel.fromString(clientOptions.getString("readConcernLevel").getValue())); - } else { - return ReadConcern.DEFAULT; - } - } - - private WriteConcern getWriteConcern(final BsonDocument clientOptions) { - if (clientOptions.containsKey("w")) { - if (clientOptions.isNumber("w")) { - return new WriteConcern(clientOptions.getNumber("w").intValue()); - } else { - return new WriteConcern(clientOptions.getString("w").getValue()); - } - } else { - return WriteConcern.ACKNOWLEDGED; - } - } - - private ReadPreference getReadPreference(final BsonDocument clientOptions) { - if (clientOptions.containsKey("readPreference")) { - return ReadPreference.valueOf(clientOptions.getString("readPreference").getValue()); - } else { - return ReadPreference.primary(); - } - } - - private void setupGridFSBuckets(final MongoDatabase database) { - gridFSBucket = GridFSBuckets.create(database); - filesCollection = database.getCollection("fs.files", BsonDocument.class); - chunksCollection = database.getCollection("fs.chunks", BsonDocument.class); - - filesCollection.drop(); - chunksCollection.drop(); - - List filesDocuments = processFiles( - gridFSData.getArray("fs.files", new BsonArray()), new ArrayList<>()); - if (!filesDocuments.isEmpty()) { - filesCollection.insertMany(filesDocuments); - } - - List chunksDocuments = processChunks( - gridFSData.getArray("fs.chunks", new BsonArray()), new ArrayList<>()); - if (!chunksDocuments.isEmpty()) { - chunksCollection.insertMany(chunksDocuments); - } - } - - @After - public void cleanUp() { - if (mongoClient != null) { - mongoClient.close(); - } - if (collectionHelper != null && definition.containsKey("failPoint")) { - collectionHelper.runAdminCommand(new BsonDocument("configureFailPoint", - definition.getDocument("failPoint").getString("configureFailPoint")) - .append("mode", new BsonString("off"))); - } - } - - @Test - public void shouldPassAllOutcomes() { - executeOperations(definition.getArray("operations")); - - if (definition.containsKey("expectations")) { - List expectedEvents = getExpectedEvents(definition.getArray("expectations"), databaseName, null); - List events = commandListener.waitForStartedEvents(expectedEvents.size()); - - assertEventsEquality(expectedEvents, events); - } - - BsonDocument expectedOutcome = definition.getDocument("outcome", new BsonDocument()); - if (expectedOutcome.containsKey("collection")) { - List collectionData = collectionHelper.find(new BsonDocumentCodec()); - assertEquals(expectedOutcome.getDocument("collection").getArray("data").getValues(), collectionData); - } - } - - private void executeOperations(final BsonArray operations) { - for (BsonValue cur : operations) { - BsonDocument operation = cur.asDocument(); - BsonValue expectedResult = operation.get("result"); - - try { - BsonDocument actualOutcome = helper.getOperationResults(operation); - if (expectedResult != null) { - BsonValue actualResult = actualOutcome.get("result"); - if (actualResult.isDocument()) { - assertEquals("Expected operation result differs from actual", expectedResult, actualResult); - } - } - } catch (MongoException e) { - // if no error was expected, re-throw it - if (!operation.getBoolean("error", BsonBoolean.FALSE).getValue()) { - throw e; - } - } - } - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { - List data = new ArrayList<>(); - for (File file : JsonPoweredTestHelper.getTestFiles("/retryable-reads")) { - BsonDocument testDocument = JsonPoweredTestHelper.getTestDocument(file); - for (BsonValue test : testDocument.getArray("tests")) { - data.add(new Object[]{file.getName(), test.asDocument().getString("description").getValue(), - testDocument.getString("database_name", new BsonString(getDefaultDatabaseName())).getValue(), - testDocument.getString("collection_name", - new BsonString(file.getName().substring(0, file.getName().lastIndexOf(".")))).getValue(), - (testDocument.containsKey("bucket_name") ? new BsonArray(singletonList(testDocument.getDocument("data"))) - : testDocument.getArray("data")), - testDocument.getString("bucket_name", null), test.asDocument(), skipTest(testDocument, test.asDocument())}); - } - } - return data; - } - - private List processFiles(final BsonArray bsonArray, final List documents) { - for (BsonValue rawDocument : bsonArray.getValues()) { - if (rawDocument.isDocument()) { - BsonDocument document = rawDocument.asDocument(); - if (document.get("length").isInt32()) { - document.put("length", new BsonInt64(document.getInt32("length").getValue())); - } - if (document.containsKey("metadata") && document.getDocument("metadata").isEmpty()) { - document.remove("metadata"); - } - if (document.containsKey("aliases") && document.getArray("aliases").getValues().size() == 0) { - document.remove("aliases"); - } - if (document.containsKey("contentType") && document.getString("contentType").getValue().length() == 0) { - document.remove("contentType"); - } - documents.add(document); - } - } - return documents; - } - - private List processChunks(final BsonArray bsonArray, final List documents) { - for (BsonValue rawDocument: bsonArray.getValues()) { - if (rawDocument.isDocument()) { - documents.add(parseHexDocument(rawDocument.asDocument())); - } - } - return documents; - } - - private BsonDocument parseHexDocument(final BsonDocument document) { - return parseHexDocument(document, "data"); - } - - private BsonDocument parseHexDocument(final BsonDocument document, final String hexDocument) { - if (document.containsKey(hexDocument) && document.get(hexDocument).isDocument()) { - byte[] bytes = Hex.decode(document.getDocument(hexDocument).getString("$hex").getValue()); - document.put(hexDocument, new BsonBinary(bytes)); - } - return document; - } -} diff --git a/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsTest.java b/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsTest.java deleted file mode 100644 index d2ed3b4ab09..00000000000 --- a/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsTest.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.client; - -import com.mongodb.MongoClientSettings; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.bson.BsonString; - -public class RetryableReadsTest extends AbstractRetryableReadsTest { - public RetryableReadsTest(final String filename, final String description, final String databaseName, final String collectionName, - final BsonArray data, final BsonString bucketName, final BsonDocument definition, final boolean skipTest) { - super(filename, description, databaseName, collectionName, data, bucketName, definition, skipTest); - } - - @Override - protected MongoClient createMongoClient(final MongoClientSettings settings) { - return MongoClients.create(settings); - } -} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java index c60d9011d33..4d50fd54577 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java @@ -24,12 +24,24 @@ import java.net.URISyntaxException; import java.util.Collection; +import static org.junit.Assume.assumeFalse; + public class UnifiedRetryableReadsTest extends UnifiedSyncTest { - public UnifiedRetryableReadsTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { + public UnifiedRetryableReadsTest(final String fileDescription, final String testDescription, final String schemaVersion, + final BsonArray runOnRequirements, final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); + customSkips(fileDescription, testDescription); + } + + public static void customSkips(final String fileDescription, @SuppressWarnings("unused") final String testDescription) { + // Skipped because driver removed the deprecated count methods + assumeFalse(fileDescription.equals("count")); + assumeFalse(fileDescription.equals("count-serverErrors")); + // Skipped because the driver never had these methods + assumeFalse(fileDescription.equals("listDatabaseObjects")); + assumeFalse(fileDescription.equals("listDatabaseObjects-serverErrors")); + assumeFalse(fileDescription.equals("listCollectionObjects")); + assumeFalse(fileDescription.equals("listCollectionObjects-serverErrors")); } @Parameterized.Parameters(name = "{0}: {1}") diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java index 62eac081d4e..46e47757ff6 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java @@ -109,6 +109,7 @@ public abstract class UnifiedTest { private final UnifiedClientEncryptionHelper clientEncryptionHelper = new UnifiedClientEncryptionHelper(entities); private final List failPoints = new ArrayList<>(); private final UnifiedTestContext rootContext = new UnifiedTestContext(); + private boolean ignoreExtraEvents; private BsonDocument startingClusterTime; private class UnifiedTestContext { @@ -151,6 +152,10 @@ public UnifiedTest(@Nullable final String fileDescription, final String schemaVe crudHelper = new UnifiedCrudHelper(entities, definition.getString("description").getValue()); } + protected void ignoreExtraEvents() { + ignoreExtraEvents = true; + } + public Entities getEntities() { return entities; } @@ -279,7 +284,8 @@ private void compareEvents(final UnifiedTestContext context, final BsonDocument for (BsonValue cur : definition.getArray("expectEvents")) { BsonDocument curClientEvents = cur.asDocument(); String client = curClientEvents.getString("client").getValue(); - boolean ignoreExtraEvents = curClientEvents.getBoolean("ignoreExtraEvents", BsonBoolean.FALSE).getValue(); + boolean ignoreExtraEvents = + curClientEvents.getBoolean("ignoreExtraEvents", BsonBoolean.valueOf(this.ignoreExtraEvents)).getValue(); String eventType = curClientEvents.getString("eventType", new BsonString("command")).getValue(); BsonArray expectedEvents = curClientEvents.getArray("events"); if (eventType.equals("command")) { From b42d76be3a50510fbae87f8b0842fdea6ddce067 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Fri, 17 May 2024 10:57:56 -0600 Subject: [PATCH 12/90] Remove outdated entries from THIRD-PARTY-NOTICES (#1393) --- THIRD-PARTY-NOTICES | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 971643143b8..200d5d3803a 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -21,7 +21,7 @@ https://github.com/mongodb/mongo-java-driver. Any republication or derived work distributed in source code form must include this copyright and license notice. -2) The following files: Assertions.java, AbstractCopyOnWriteMap.java, CopyOnWriteMap.java +2) The following files: Assertions.java Copyright (c) 2008-2014 Atlassian Pty Ltd @@ -37,7 +37,7 @@ https://github.com/mongodb/mongo-java-driver. See the License for the specific language governing permissions and limitations under the License. -3) The following files: Beta.java, UnsignedLongs.java, UnsignedLongsTest.java +3) The following files: Beta.java Copyright 2010 The Guava Authors Copyright 2011 The Guava Authors @@ -54,24 +54,7 @@ https://github.com/mongodb/mongo-java-driver. See the License for the specific language governing permissions and limitations under the License. -4) The following files: ReadTimeoutHandler.java - - Copyright 2008-present MongoDB, Inc. - Copyright 2012 The Netty Project - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -5) The following files: InstantCodec.java, Jsr310CodecProvider.java, LocalDateCodec.java, LocalDateTimeCodec.java, LocalTimeCodec.java +4) The following files: InstantCodec.java, Jsr310CodecProvider.java, LocalDateCodec.java, LocalDateTimeCodec.java, LocalTimeCodec.java Copyright 2008-present MongoDB, Inc. Copyright 2018 Cezary Bartosiak @@ -88,7 +71,7 @@ https://github.com/mongodb/mongo-java-driver. See the License for the specific language governing permissions and limitations under the License. -6) The following files: SaslPrep.java +5) The following files: SaslPrep.java Copyright 2008-present MongoDB, Inc. Copyright 2017 Tom Bentley @@ -105,7 +88,7 @@ https://github.com/mongodb/mongo-java-driver. See the License for the specific language governing permissions and limitations under the License. -7) The following files (originally from https://github.com/marianobarrios/tls-channel): +6) The following files (originally from https://github.com/marianobarrios/tls-channel): AsynchronousTlsChannel.java AsynchronousTlsChannelGroup.java @@ -155,7 +138,7 @@ https://github.com/mongodb/mongo-java-driver. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -8) The following files (originally from https://github.com/google/guava): +7) The following files (originally from https://github.com/google/guava): InetAddressUtils.java (formerly InetAddresses.java) InetAddressUtilsTest.java (formerly InetAddressesTest.java) From 84247d3be14f7f78be49e4596eae18c8e94c3f0a Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Fri, 17 May 2024 11:53:50 -0600 Subject: [PATCH 13/90] Improve `SecureRandom` usage in `ObjectId` (#1394) --- bson/src/main/org/bson/types/ObjectId.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bson/src/main/org/bson/types/ObjectId.java b/bson/src/main/org/bson/types/ObjectId.java index 57c1d8c3738..7c1b1d29540 100644 --- a/bson/src/main/org/bson/types/ObjectId.java +++ b/bson/src/main/org/bson/types/ObjectId.java @@ -57,7 +57,7 @@ public final class ObjectId implements Comparable, Serializable { private static final int RANDOM_VALUE1; private static final short RANDOM_VALUE2; - private static final AtomicInteger NEXT_COUNTER = new AtomicInteger(new SecureRandom().nextInt()); + private static final AtomicInteger NEXT_COUNTER; private static final char[] HEX_CHARS = { '0', '1', '2', '3', '4', '5', '6', '7', @@ -409,6 +409,7 @@ private Object readResolve() { SecureRandom secureRandom = new SecureRandom(); RANDOM_VALUE1 = secureRandom.nextInt(0x01000000); RANDOM_VALUE2 = (short) secureRandom.nextInt(0x00008000); + NEXT_COUNTER = new AtomicInteger(secureRandom.nextInt()); } catch (Exception e) { throw new RuntimeException(e); } From c161afc993c2f0e7ca39e57ce8753ad1378fa13d Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Tue, 21 May 2024 12:52:00 -0600 Subject: [PATCH 14/90] Augment `config/spotbugs/exclude.xml` with finding status and rank (#1392) JAVA-5431 --- config/spotbugs/exclude.xml | 159 +++++++++++------------------------- 1 file changed, 48 insertions(+), 111 deletions(-) diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml index d35f0a81c8a..fb0e4e9ec0e 100644 --- a/config/spotbugs/exclude.xml +++ b/config/spotbugs/exclude.xml @@ -14,214 +14,150 @@ ~ limitations under the License. --> + - - - - - + + + + + - + + - - - + + - + + - + + - + + - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - - - - + - - - - - - - - - - - + + + - + + - + + + - + + - + + @@ -229,36 +165,35 @@ + + - + + - - - - - + - + @@ -268,11 +203,13 @@ see: https://github.com/Kotlin/kotlinx.coroutines/issues/3099 --> + + From 790185f9ed64fed4e5720e2bbbff170d247a7374 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Thu, 23 May 2024 09:47:08 -0400 Subject: [PATCH 15/90] Upgrade to logback-classic 1.3.14 (#1399) Note that logback-classic is only used for testing. JAVA-5482 --- build.gradle | 5 +++-- driver-benchmarks/build.gradle | 2 +- driver-workload-executor/build.gradle | 2 +- graalvm-native-image-app/build.gradle | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/build.gradle b/build.gradle index aa0dd05ed38..762ca55be34 100644 --- a/build.gradle +++ b/build.gradle @@ -58,6 +58,7 @@ ext { mongoCryptVersion = '1.8.0' projectReactorVersion = '2022.0.0' junitBomVersion = '5.8.2' + logbackVersion = '1.3.14' gitVersion = getGitVersion() } @@ -125,7 +126,7 @@ configure(scalaProjects) { testImplementation('org.scalatest:scalatest-shouldmatchers_%%:3.2.9') testImplementation('org.scalatestplus:junit-4-13_%%:3.2.9.0') testImplementation('org.scalatestplus:mockito-3-12_%%:3.2.10.0') - testImplementation('ch.qos.logback:logback-classic:1.1.3') + testImplementation("ch.qos.logback:logback-classic:$logbackVersion") testImplementation('org.reflections:reflections:0.9.10') } @@ -265,7 +266,7 @@ configure(javaCodeCheckedProjects) { testImplementation 'cglib:cglib-nodep:2.2.2' testImplementation 'org.objenesis:objenesis:1.3' testImplementation 'org.hamcrest:hamcrest-all:1.3' - testImplementation 'ch.qos.logback:logback-classic:1.1.1' + testImplementation "ch.qos.logback:logback-classic:$logbackVersion" testImplementation project(':util:spock') //Adding categories to classpath } diff --git a/driver-benchmarks/build.gradle b/driver-benchmarks/build.gradle index c843c40b56b..960674011eb 100644 --- a/driver-benchmarks/build.gradle +++ b/driver-benchmarks/build.gradle @@ -31,7 +31,7 @@ sourceSets { dependencies { api project(':driver-sync') - implementation 'ch.qos.logback:logback-classic:1.2.11' + implementation "ch.qos.logback:logback-classic:$logbackVersion" } javadoc { diff --git a/driver-workload-executor/build.gradle b/driver-workload-executor/build.gradle index ac13859c672..7c48e444dc2 100644 --- a/driver-workload-executor/build.gradle +++ b/driver-workload-executor/build.gradle @@ -43,7 +43,7 @@ dependencies { implementation project(':driver-sync') implementation project(':driver-core').sourceSets.test.output implementation project(':driver-sync').sourceSets.test.output - implementation 'ch.qos.logback:logback-classic:1.2.11' + implementation "ch.qos.logback:logback-classic:$logbackVersion" implementation(platform("org.junit:junit-bom:$junitBomVersion")) implementation('org.junit.jupiter:junit-jupiter') implementation('org.junit.vintage:junit-vintage-engine') diff --git a/graalvm-native-image-app/build.gradle b/graalvm-native-image-app/build.gradle index abb5ffda3a2..c34d8623b15 100644 --- a/graalvm-native-image-app/build.gradle +++ b/graalvm-native-image-app/build.gradle @@ -89,7 +89,7 @@ dependencies { implementation project(':driver-reactive-streams').sourceSets.test.output implementation "org.mongodb:mongodb-crypt:$mongoCryptVersion" implementation 'org.slf4j:slf4j-api:2.0.12' - implementation 'ch.qos.logback:logback-classic:1.5.3' + implementation "ch.qos.logback:logback-classic:$logbackVersion" implementation platform("io.projectreactor:reactor-bom:$projectReactorVersion") implementation 'io.projectreactor:reactor-core' } From a34cb315831d0a1f50b83a27d2dacf0576ed73a4 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Mon, 27 May 2024 17:11:32 -0400 Subject: [PATCH 16/90] Upgrade to Spotbugs 6.0.15 (#1398) * Exclude CT_CONSTRUCTOR_THROW * Exclude PA_PUBLIC_PRIMITIVE_ATTRIBUTE * Add assertion to handle NP warning JAVA-5480 --- build.gradle | 2 +- config/spotbugs/exclude.xml | 21 ++++++++++++++++++- .../AbstractMultiServerCluster.java | 3 ++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/build.gradle b/build.gradle index 762ca55be34..08e6ae4a376 100644 --- a/build.gradle +++ b/build.gradle @@ -25,7 +25,7 @@ buildscript { } dependencies { classpath 'com.netflix.nebula:gradle-extra-configurations-plugin:7.0.0' - classpath "com.github.spotbugs.snom:spotbugs-gradle-plugin:5.0.13" + classpath "com.github.spotbugs.snom:spotbugs-gradle-plugin:6.0.15" classpath 'biz.aQute.bnd:biz.aQute.bnd.gradle:5.1.2' // Scala plugins diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml index fb0e4e9ec0e..1ef5de78bf5 100644 --- a/config/spotbugs/exclude.xml +++ b/config/spotbugs/exclude.xml @@ -14,10 +14,29 @@ ~ limitations under the License. --> - + + + + + + + + + + + diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java index 5b6fa8f56fe..272442a190e 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java @@ -38,6 +38,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; import static com.mongodb.connection.ClusterType.UNKNOWN; @@ -234,7 +235,7 @@ private boolean handleReplicaSetMemberChanged(final ServerDescription newDescrip } if (replicaSetName == null) { - replicaSetName = newDescription.getSetName(); + replicaSetName = assertNotNull(newDescription.getSetName()); } if (!replicaSetName.equals(newDescription.getSetName())) { From 01ba99df53d0e7ccd3fb17ba971d99821c0fa2e5 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Tue, 28 May 2024 13:12:43 -0600 Subject: [PATCH 17/90] Add `ssdlc-report.sh` that uses SpotBugs to create SARIF files (#1401) In the future this script may do more work for us. JAVA-5431 --- .evergreen/ssdlc-report.sh | 15 +++++++++++++++ build.gradle | 5 ++++- 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100755 .evergreen/ssdlc-report.sh diff --git a/.evergreen/ssdlc-report.sh b/.evergreen/ssdlc-report.sh new file mode 100755 index 00000000000..f11a587a20a --- /dev/null +++ b/.evergreen/ssdlc-report.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -o errexit + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +source "${RELATIVE_DIR_PATH}/javaConfig.bash" + +echo "Creating SSLDC reports" +./gradlew -version +./gradlew -PssdlcReport.enabled=true --continue -x test -x integrationTest -x spotlessApply clean check scalaCheck kotlinCheck testClasses || true +echo "SpotBugs created the following SARIF files" +find . -path "*/spotbugs/*.sarif" diff --git a/build.gradle b/build.gradle index 08e6ae4a376..2ba15e3daf8 100644 --- a/build.gradle +++ b/build.gradle @@ -338,7 +338,9 @@ configure(javaCodeCheckedProjects) { } spotbugs { - excludeFilter = new File(configDir, 'spotbugs/exclude.xml') + if (!project.buildingWith('ssdlcReport.enabled')) { + excludeFilter = new File(configDir, 'spotbugs/exclude.xml') + } } codenarc { @@ -350,6 +352,7 @@ configure(javaCodeCheckedProjects) { reports { xml.enabled = project.buildingWith('xmlReports.enabled') html.enabled = !project.buildingWith('xmlReports.enabled') + sarif.enabled = project.buildingWith('ssdlcReport.enabled') } } From 2412cbd5194ce166b6c1fd79e2a1a1b694dbffc4 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Tue, 28 May 2024 13:16:35 -0600 Subject: [PATCH 18/90] Direct retries to another mongos if one is available (#1367) JAVA-4254 --------- Co-authored-by: Maxim Katcharov --- .../mongodb/connection/ClusterSettings.java | 13 +- .../internal/async/function/RetryState.java | 62 ++++---- .../RetryingAsyncCallbackSupplier.java | 35 +++-- .../async/function/RetryingSyncSupplier.java | 12 +- .../AbstractMultiServerCluster.java | 15 +- .../internal/connection/BaseCluster.java | 127 ++++++++------- .../mongodb/internal/connection/Cluster.java | 21 ++- .../connection/LoadBalancedCluster.java | 5 +- .../internal/connection/OperationContext.java | 82 ++++++++++ .../connection/SingleServerCluster.java | 6 +- .../operation/AsyncOperationHelper.java | 6 +- .../operation/CommandOperationHelper.java | 18 ++- .../operation/MixedBulkWriteOperation.java | 5 +- .../operation/SyncOperationHelper.java | 6 +- .../AtMostTwoRandomServerSelector.java | 60 +++++++ .../MinimumOperationCountServerSelector.java | 62 ++++++++ ...tractServerDiscoveryAndMonitoringTest.java | 5 +- .../BaseClusterSpecification.groovy | 10 +- .../internal/connection/BaseClusterTest.java | 61 ++++++++ .../DefaultServerSpecification.groovy | 7 +- .../MultiServerClusterSpecification.groovy | 4 +- .../ServerDeprioritizationTest.java | 124 +++++++++++++++ .../ServerDiscoveryAndMonitoringTest.java | 2 +- ...erverSelectionWithinLatencyWindowTest.java | 16 +- .../SingleServerClusterSpecification.groovy | 6 +- .../AtMostTwoRandomServerSelectorTest.java | 99 ++++++++++++ ...nimumOperationCountServerSelectorTest.java | 136 ++++++++++++++++ .../client/RetryableReadsProseTest.java | 38 ++++- .../client/RetryableWritesProseTest.java | 26 ++- .../client/RetryableReadsProseTest.java | 35 ++++- .../client/RetryableWritesProseTest.java | 148 +++++++++++++++++- 31 files changed, 1086 insertions(+), 166 deletions(-) create mode 100644 driver-core/src/main/com/mongodb/internal/selector/AtMostTwoRandomServerSelector.java create mode 100644 driver-core/src/main/com/mongodb/internal/selector/MinimumOperationCountServerSelector.java create mode 100644 driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java create mode 100644 driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java create mode 100644 driver-core/src/test/unit/com/mongodb/internal/selector/AtMostTwoRandomServerSelectorTest.java create mode 100644 driver-core/src/test/unit/com/mongodb/internal/selector/MinimumOperationCountServerSelectorTest.java diff --git a/driver-core/src/main/com/mongodb/connection/ClusterSettings.java b/driver-core/src/main/com/mongodb/connection/ClusterSettings.java index 84a24bbd22b..0af168725cd 100644 --- a/driver-core/src/main/com/mongodb/connection/ClusterSettings.java +++ b/driver-core/src/main/com/mongodb/connection/ClusterSettings.java @@ -468,16 +468,18 @@ public String getRequiredReplicaSetName() { * *

The server selector augments the normal server selection rules applied by the driver when determining * which server to send an operation to. At the point that it's called by the driver, the - * {@link com.mongodb.connection.ClusterDescription} which is passed to it contains a list of - * {@link com.mongodb.connection.ServerDescription} instances which satisfy either the configured {@link com.mongodb.ReadPreference} - * for any read operation or ones that can take writes (e.g. a standalone, mongos, or replica set primary). + * {@link ClusterDescription} which is passed to it {@linkplain ClusterDescription#getServerDescriptions() contains} a list of + * {@link ServerDescription} instances which satisfy either the configured {@link com.mongodb.ReadPreference} + * for any read operation or ones that can take writes (e.g. a standalone, mongos, or replica set primary), + * barring those corresponding to servers that the driver considers unavailable or potentially problematic. *

*

The server selector can then filter the {@code ServerDescription} list using whatever criteria that is required by the * application.

- *

After this selector executes, two additional selectors are applied by the driver:

+ *

After this selector executes, three additional selectors are applied by the driver:

*
    *
  • select from within the latency window
  • - *
  • select a random server from those remaining
  • + *
  • select at most two random servers from those remaining
  • + *
  • select the one with fewer outstanding concurrent operations
  • *
*

To skip the latency window selector, an application can:

*
    @@ -486,6 +488,7 @@ public String getRequiredReplicaSetName() { *
* * @return the server selector, which may be null + * @see Builder#serverSelector(ServerSelector) */ @Nullable public ServerSelector getServerSelector() { diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java index ba4da185d79..89329f16a24 100644 --- a/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java +++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java @@ -78,24 +78,25 @@ public RetryState() { * which is usually synchronous code. * * @param attemptException The exception produced by the most recent attempt. - * It is passed to the {@code retryPredicate} and to the {@code exceptionTransformer}. - * @param exceptionTransformer A function that chooses which exception to preserve as a prospective failed result of the associated - * retryable activity and may also transform or mutate the exceptions. - * The choice is between + * It is passed to the {@code retryPredicate} and to the {@code onAttemptFailureOperator}. + * @param onAttemptFailureOperator The action that is called once per failed attempt before (in the happens-before order) the + * {@code retryPredicate}, regardless of whether the {@code retryPredicate} is called. + * This action is allowed to have side effects. + *

+ * It also has to choose which exception to preserve as a prospective failed result of the associated retryable activity. + * The {@code onAttemptFailureOperator} may mutate its arguments, choose from the arguments, or return a different exception, + * but it must return a {@code @}{@link NonNull} value. + * The choice is between

*
    *
  • the previously chosen exception or {@code null} if none has been chosen - * (the first argument of the {@code exceptionTransformer})
  • - *
  • and the exception from the most recent attempt (the second argument of the {@code exceptionTransformer}).
  • + * (the first argument of the {@code onAttemptFailureOperator}) + *
  • and the exception from the most recent attempt (the second argument of the {@code onAttemptFailureOperator}).
  • *
- * The {@code exceptionTransformer} may either choose from its arguments, or return a different exception, a.k.a. transform, - * but it must return a {@code @}{@link NonNull} value. - * The {@code exceptionTransformer} is called once before (in the happens-before order) the {@code retryPredicate}, - * regardless of whether the {@code retryPredicate} is called. The result of the {@code exceptionTransformer} does not affect - * what exception is passed to the {@code retryPredicate}. + * The result of the {@code onAttemptFailureOperator} does not affect the exception passed to the {@code retryPredicate}. * @param retryPredicate {@code true} iff another attempt needs to be made. The {@code retryPredicate} is called not more than once * per attempt and only if all the following is true: *
    - *
  • {@code exceptionTransformer} completed normally;
  • + *
  • {@code onAttemptFailureOperator} completed normally;
  • *
  • the most recent attempt is not the {@linkplain #isLastAttempt() last} one.
  • *
* The {@code retryPredicate} accepts this {@link RetryState} and the exception from the most recent attempt, @@ -103,7 +104,7 @@ public RetryState() { * after (in the happens-before order) testing the {@code retryPredicate}, and only if the predicate completes normally. * @throws RuntimeException Iff any of the following is true: *
    - *
  • the {@code exceptionTransformer} completed abruptly;
  • + *
  • the {@code onAttemptFailureOperator} completed abruptly;
  • *
  • the most recent attempt is the {@linkplain #isLastAttempt() last} one;
  • *
  • the {@code retryPredicate} completed abruptly;
  • *
  • the {@code retryPredicate} is {@code false}.
  • @@ -112,10 +113,10 @@ public RetryState() { * i.e., the caller must not do any more attempts. * @see #advanceOrThrow(Throwable, BinaryOperator, BiPredicate) */ - void advanceOrThrow(final RuntimeException attemptException, final BinaryOperator exceptionTransformer, + void advanceOrThrow(final RuntimeException attemptException, final BinaryOperator onAttemptFailureOperator, final BiPredicate retryPredicate) throws RuntimeException { try { - doAdvanceOrThrow(attemptException, exceptionTransformer, retryPredicate, true); + doAdvanceOrThrow(attemptException, onAttemptFailureOperator, retryPredicate, true); } catch (RuntimeException | Error unchecked) { throw unchecked; } catch (Throwable checked) { @@ -129,18 +130,19 @@ void advanceOrThrow(final RuntimeException attemptException, final BinaryOperato * * @see #advanceOrThrow(RuntimeException, BinaryOperator, BiPredicate) */ - void advanceOrThrow(final Throwable attemptException, final BinaryOperator exceptionTransformer, + void advanceOrThrow(final Throwable attemptException, final BinaryOperator onAttemptFailureOperator, final BiPredicate retryPredicate) throws Throwable { - doAdvanceOrThrow(attemptException, exceptionTransformer, retryPredicate, false); + doAdvanceOrThrow(attemptException, onAttemptFailureOperator, retryPredicate, false); } /** * @param onlyRuntimeExceptions {@code true} iff the method must expect {@link #exception} and {@code attemptException} to be * {@link RuntimeException}s and must not explicitly handle other {@link Throwable} types, of which only {@link Error} is possible * as {@link RetryState} does not have any source of {@link Exception}s. + * @param onAttemptFailureOperator See {@link #advanceOrThrow(RuntimeException, BinaryOperator, BiPredicate)}. */ private void doAdvanceOrThrow(final Throwable attemptException, - final BinaryOperator exceptionTransformer, + final BinaryOperator onAttemptFailureOperator, final BiPredicate retryPredicate, final boolean onlyRuntimeExceptions) throws Throwable { assertTrue(attempt() < attempts); @@ -149,7 +151,7 @@ private void doAdvanceOrThrow(final Throwable attemptException, assertTrue(isRuntime(attemptException)); } assertTrue(!isFirstAttempt() || exception == null); - Throwable newlyChosenException = transformException(exception, attemptException, onlyRuntimeExceptions, exceptionTransformer); + Throwable newlyChosenException = callOnAttemptFailureOperator(exception, attemptException, onlyRuntimeExceptions, onAttemptFailureOperator); if (isLastAttempt()) { exception = newlyChosenException; throw exception; @@ -167,27 +169,31 @@ private void doAdvanceOrThrow(final Throwable attemptException, /** * @param onlyRuntimeExceptions See {@link #doAdvanceOrThrow(Throwable, BinaryOperator, BiPredicate, boolean)}. + * @param onAttemptFailureOperator See {@link #advanceOrThrow(RuntimeException, BinaryOperator, BiPredicate)}. */ - private static Throwable transformException(@Nullable final Throwable previouslyChosenException, final Throwable attemptException, - final boolean onlyRuntimeExceptions, final BinaryOperator exceptionTransformer) { + private static Throwable callOnAttemptFailureOperator( + @Nullable final Throwable previouslyChosenException, + final Throwable attemptException, + final boolean onlyRuntimeExceptions, + final BinaryOperator onAttemptFailureOperator) { if (onlyRuntimeExceptions && previouslyChosenException != null) { assertTrue(isRuntime(previouslyChosenException)); } Throwable result; try { - result = assertNotNull(exceptionTransformer.apply(previouslyChosenException, attemptException)); + result = assertNotNull(onAttemptFailureOperator.apply(previouslyChosenException, attemptException)); if (onlyRuntimeExceptions) { assertTrue(isRuntime(result)); } - } catch (Throwable exceptionTransformerException) { - if (onlyRuntimeExceptions && !isRuntime(exceptionTransformerException)) { - throw exceptionTransformerException; + } catch (Throwable onAttemptFailureOperatorException) { + if (onlyRuntimeExceptions && !isRuntime(onAttemptFailureOperatorException)) { + throw onAttemptFailureOperatorException; } if (previouslyChosenException != null) { - exceptionTransformerException.addSuppressed(previouslyChosenException); + onAttemptFailureOperatorException.addSuppressed(previouslyChosenException); } - exceptionTransformerException.addSuppressed(attemptException); - throw exceptionTransformerException; + onAttemptFailureOperatorException.addSuppressed(attemptException); + throw onAttemptFailureOperatorException; } return result; } diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryingAsyncCallbackSupplier.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryingAsyncCallbackSupplier.java index e0f3d8c7457..16f6f2e7086 100644 --- a/driver-core/src/main/com/mongodb/internal/async/function/RetryingAsyncCallbackSupplier.java +++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryingAsyncCallbackSupplier.java @@ -41,31 +41,34 @@ public final class RetryingAsyncCallbackSupplier implements AsyncCallbackSupplier { private final RetryState state; private final BiPredicate retryPredicate; - private final BinaryOperator failedResultTransformer; + private final BinaryOperator onAttemptFailureOperator; private final AsyncCallbackSupplier asyncFunction; /** * @param state The {@link RetryState} to be deemed as initial for the purpose of the new {@link RetryingAsyncCallbackSupplier}. - * @param failedResultTransformer A function that chooses which failed result of the {@code asyncFunction} to preserve as a prospective - * failed result of this {@link RetryingAsyncCallbackSupplier} and may also transform or mutate the exceptions. - * The choice is between + * @param onAttemptFailureOperator The action that is called once per failed attempt before (in the happens-before order) the + * {@code retryPredicate}, regardless of whether the {@code retryPredicate} is called. + * This action is allowed to have side effects. + *

    + * It also has to choose which exception to preserve as a prospective failed result of this {@link RetryingAsyncCallbackSupplier}. + * The {@code onAttemptFailureOperator} may mutate its arguments, choose from the arguments, or return a different exception, + * but it must return a {@code @}{@link NonNull} value. + * The choice is between

    *
      *
    • the previously chosen failed result or {@code null} if none has been chosen - * (the first argument of the {@code failedResultTransformer})
    • - *
    • and the failed result from the most recent attempt (the second argument of the {@code failedResultTransformer}).
    • + * (the first argument of the {@code onAttemptFailureOperator}) + *
    • and the failed result from the most recent attempt (the second argument of the {@code onAttemptFailureOperator}).
    • *
    - * The {@code failedResultTransformer} may either choose from its arguments, or return a different exception, a.k.a. transform, - * but it must return a {@code @}{@link NonNull} value. - * If it completes abruptly, then the {@code asyncFunction} cannot be retried and the exception thrown by - * the {@code failedResultTransformer} is used as a failed result of this {@link RetryingAsyncCallbackSupplier}. - * The {@code failedResultTransformer} is called before (in the happens-before order) the {@code retryPredicate}. - * The result of the {@code failedResultTransformer} does not affect what exception is passed to the {@code retryPredicate}. + * The result of the {@code onAttemptFailureOperator} does not affect the exception passed to the {@code retryPredicate}. + *

    + * If {@code onAttemptFailureOperator} completes abruptly, then the {@code asyncFunction} cannot be retried and the exception thrown by + * the {@code onAttemptFailureOperator} is used as a failed result of this {@link RetryingAsyncCallbackSupplier}.

    * @param retryPredicate {@code true} iff another attempt needs to be made. If it completes abruptly, * then the {@code asyncFunction} cannot be retried and the exception thrown by the {@code retryPredicate} * is used as a failed result of this {@link RetryingAsyncCallbackSupplier}. The {@code retryPredicate} is called not more than once * per attempt and only if all the following is true: *
      - *
    • {@code failedResultTransformer} completed normally;
    • + *
    • {@code onAttemptFailureOperator} completed normally;
    • *
    • the most recent attempt is not the {@linkplain RetryState#isLastAttempt() last} one.
    • *
    * The {@code retryPredicate} accepts this {@link RetryState} and the exception from the most recent attempt, @@ -75,12 +78,12 @@ public final class RetryingAsyncCallbackSupplier implements AsyncCallbackSupp */ public RetryingAsyncCallbackSupplier( final RetryState state, - final BinaryOperator failedResultTransformer, + final BinaryOperator onAttemptFailureOperator, final BiPredicate retryPredicate, final AsyncCallbackSupplier asyncFunction) { this.state = state; this.retryPredicate = retryPredicate; - this.failedResultTransformer = failedResultTransformer; + this.onAttemptFailureOperator = onAttemptFailureOperator; this.asyncFunction = asyncFunction; } @@ -113,7 +116,7 @@ private class RetryingCallback implements SingleResultCallback { public void onResult(@Nullable final R result, @Nullable final Throwable t) { if (t != null) { try { - state.advanceOrThrow(t, failedResultTransformer, retryPredicate); + state.advanceOrThrow(t, onAttemptFailureOperator, retryPredicate); } catch (Throwable failedResult) { wrapped.onResult(null, failedResult); return; diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryingSyncSupplier.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryingSyncSupplier.java index 315197f0da9..ad3e4b2b807 100644 --- a/driver-core/src/main/com/mongodb/internal/async/function/RetryingSyncSupplier.java +++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryingSyncSupplier.java @@ -37,26 +37,26 @@ public final class RetryingSyncSupplier implements Supplier { private final RetryState state; private final BiPredicate retryPredicate; - private final BinaryOperator failedResultTransformer; + private final BinaryOperator onAttemptFailureOperator; private final Supplier syncFunction; /** * See {@link RetryingAsyncCallbackSupplier#RetryingAsyncCallbackSupplier(RetryState, BinaryOperator, BiPredicate, AsyncCallbackSupplier)} * for the documentation of the parameters. * - * @param failedResultTransformer Even though the {@code failedResultTransformer} accepts {@link Throwable}, + * @param onAttemptFailureOperator Even though the {@code onAttemptFailureOperator} accepts {@link Throwable}, * only {@link RuntimeException}s are passed to it. * @param retryPredicate Even though the {@code retryPredicate} accepts {@link Throwable}, * only {@link RuntimeException}s are passed to it. */ public RetryingSyncSupplier( final RetryState state, - final BinaryOperator failedResultTransformer, + final BinaryOperator onAttemptFailureOperator, final BiPredicate retryPredicate, final Supplier syncFunction) { this.state = state; this.retryPredicate = retryPredicate; - this.failedResultTransformer = failedResultTransformer; + this.onAttemptFailureOperator = onAttemptFailureOperator; this.syncFunction = syncFunction; } @@ -66,10 +66,10 @@ public R get() { try { return syncFunction.get(); } catch (RuntimeException attemptException) { - state.advanceOrThrow(attemptException, failedResultTransformer, retryPredicate); + state.advanceOrThrow(attemptException, onAttemptFailureOperator, retryPredicate); } catch (Exception attemptException) { // wrap potential sneaky / Kotlin exceptions - state.advanceOrThrow(new RuntimeException(attemptException), failedResultTransformer, retryPredicate); + state.advanceOrThrow(new RuntimeException(attemptException), onAttemptFailureOperator, retryPredicate); } } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java index 272442a190e..e1d7d6946cb 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java @@ -31,9 +31,11 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -123,14 +125,13 @@ public void close() { } @Override - public ClusterableServer getServer(final ServerAddress serverAddress) { + public ServersSnapshot getServersSnapshot() { isTrue("is open", !isClosed()); - - ServerTuple serverTuple = addressToServerTupleMap.get(serverAddress); - if (serverTuple == null) { - return null; - } - return serverTuple.server; + Map nonAtomicSnapshot = new HashMap<>(addressToServerTupleMap); + return serverAddress -> { + ServerTuple serverTuple = nonAtomicSnapshot.get(serverAddress); + return serverTuple == null ? null : serverTuple.server; + }; } void onChange(final Collection newHosts) { diff --git a/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java index 71526534c88..292822244b7 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java @@ -33,28 +33,31 @@ import com.mongodb.event.ClusterOpeningEvent; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.OperationContext.ServerDeprioritization; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.logging.LogMessage; import com.mongodb.internal.logging.LogMessage.Entry; import com.mongodb.internal.logging.StructuredLogger; +import com.mongodb.internal.selector.AtMostTwoRandomServerSelector; import com.mongodb.internal.selector.LatencyMinimizingServerSelector; +import com.mongodb.internal.selector.MinimumOperationCountServerSelector; import com.mongodb.lang.Nullable; import com.mongodb.selector.CompositeServerSelector; import com.mongodb.selector.ServerSelector; -import java.util.ArrayList; import java.util.Collections; import java.util.Deque; import java.util.Iterator; import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Function; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.connection.ServerDescription.MAX_DRIVER_WIRE_VERSION; @@ -78,9 +81,9 @@ import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static java.lang.String.format; import static java.util.Arrays.asList; -import static java.util.Comparator.comparingInt; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.stream.Collectors.toList; abstract class BaseCluster implements Cluster { private static final Logger LOGGER = Loggers.getLogger("cluster"); @@ -122,8 +125,8 @@ public ServerTuple selectServer(final ServerSelector serverSelector, final Opera CountDownLatch currentPhase = phase.get(); ClusterDescription curDescription = description; logServerSelectionStarted(clusterId, operationContext, serverSelector, curDescription); - ServerSelector compositeServerSelector = getCompositeServerSelector(serverSelector); - ServerTuple serverTuple = selectServer(compositeServerSelector, curDescription); + ServerDeprioritization serverDeprioritization = operationContext.getServerDeprioritization(); + ServerTuple serverTuple = createCompleteSelectorAndSelectServer(serverSelector, curDescription, serverDeprioritization); boolean selectionWaitingLogged = false; @@ -137,8 +140,10 @@ public ServerTuple selectServer(final ServerSelector serverSelector, final Opera } if (serverTuple != null) { + ServerAddress serverAddress = serverTuple.getServerDescription().getAddress(); logServerSelectionSucceeded( - clusterId, operationContext, serverTuple.getServerDescription().getAddress(), serverSelector, curDescription); + clusterId, operationContext, serverAddress, serverSelector, curDescription); + serverDeprioritization.updateCandidate(serverAddress); return serverTuple; } @@ -163,7 +168,7 @@ public ServerTuple selectServer(final ServerSelector serverSelector, final Opera currentPhase = phase.get(); curDescription = description; - serverTuple = selectServer(compositeServerSelector, curDescription); + serverTuple = createCompleteSelectorAndSelectServer(serverSelector, curDescription, serverDeprioritization); } } catch (InterruptedException e) { @@ -180,8 +185,7 @@ public void selectServerAsync(final ServerSelector serverSelector, final Operati ClusterDescription currentDescription = description; logServerSelectionStarted(clusterId, operationContext, serverSelector, currentDescription); - ServerSelectionRequest request = new ServerSelectionRequest(operationContext, serverSelector, getCompositeServerSelector(serverSelector), - getMaxWaitTimeNanos(), callback); + ServerSelectionRequest request = new ServerSelectionRequest(operationContext, serverSelector, getMaxWaitTimeNanos(), callback); if (!handleServerSelectionRequest(request, currentPhase, currentDescription)) { notifyWaitQueueHandler(request); @@ -276,10 +280,13 @@ private boolean handleServerSelectionRequest(final ServerSelectionRequest reques return true; } - ServerTuple serverTuple = selectServer(request.compositeSelector, description); + ServerDeprioritization serverDeprioritization = request.operationContext.getServerDeprioritization(); + ServerTuple serverTuple = createCompleteSelectorAndSelectServer(request.originalSelector, description, serverDeprioritization); if (serverTuple != null) { - logServerSelectionSucceeded(clusterId, request.operationContext, serverTuple.getServerDescription().getAddress(), + ServerAddress serverAddress = serverTuple.getServerDescription().getAddress(); + logServerSelectionSucceeded(clusterId, request.operationContext, serverAddress, request.originalSelector, description); + serverDeprioritization.updateCandidate(serverAddress); request.onResult(serverTuple, null); return true; } @@ -302,55 +309,63 @@ private boolean handleServerSelectionRequest(final ServerSelectionRequest reques } @Nullable - private ServerTuple selectServer(final ServerSelector serverSelector, - final ClusterDescription clusterDescription) { - return selectServer(serverSelector, clusterDescription, this::getServer); + private ServerTuple createCompleteSelectorAndSelectServer( + final ServerSelector serverSelector, + final ClusterDescription clusterDescription, + final ServerDeprioritization serverDeprioritization) { + return createCompleteSelectorAndSelectServer( + serverSelector, clusterDescription, getServersSnapshot(), serverDeprioritization, settings); } @Nullable @VisibleForTesting(otherwise = PRIVATE) - static ServerTuple selectServer(final ServerSelector serverSelector, final ClusterDescription clusterDescription, - final Function serverCatalog) { - return atMostNRandom(new ArrayList<>(serverSelector.select(clusterDescription)), 2, serverDescription -> { - Server server = serverCatalog.apply(serverDescription.getAddress()); - return server == null ? null : new ServerTuple(server, serverDescription); - }).stream() - .min(comparingInt(serverTuple -> serverTuple.getServer().operationCount())) + static ServerTuple createCompleteSelectorAndSelectServer( + final ServerSelector serverSelector, + final ClusterDescription clusterDescription, + final ServersSnapshot serversSnapshot, + final ServerDeprioritization serverDeprioritization, + final ClusterSettings settings) { + ServerSelector completeServerSelector = getCompleteServerSelector(serverSelector, serverDeprioritization, serversSnapshot, settings); + return completeServerSelector.select(clusterDescription) + .stream() + .map(serverDescription -> new ServerTuple( + assertNotNull(serversSnapshot.getServer(serverDescription.getAddress())), + serverDescription)) + .findAny() .orElse(null); } - /** - * Returns a new {@link List} of at most {@code n} elements, where each element is a result of - * {@linkplain Function#apply(Object) applying} the {@code transformer} to a randomly picked element from the specified {@code list}, - * such that no element is picked more than once. If the {@code transformer} produces {@code null}, then another element is picked - * until either {@code n} transformed non-{@code null} elements are collected, or the {@code list} does not have - * unpicked elements left. - *

    - * Note that this method may reorder the {@code list}, as it uses the - * Fisher–Yates, a.k.a. Durstenfeld, shuffle algorithm. - */ - private static List atMostNRandom(final ArrayList list, final int n, - final Function transformer) { - ThreadLocalRandom random = ThreadLocalRandom.current(); - List result = new ArrayList<>(n); - for (int i = list.size() - 1; i >= 0 && result.size() < n; i--) { - Collections.swap(list, i, random.nextInt(i + 1)); - ServerTuple serverTuple = transformer.apply(list.get(i)); - if (serverTuple != null) { - result.add(serverTuple); - } - } - return result; - } - - private ServerSelector getCompositeServerSelector(final ServerSelector serverSelector) { - ServerSelector latencyMinimizingServerSelector = - new LatencyMinimizingServerSelector(settings.getLocalThreshold(MILLISECONDS), MILLISECONDS); - if (settings.getServerSelector() == null) { - return new CompositeServerSelector(asList(serverSelector, latencyMinimizingServerSelector)); - } else { - return new CompositeServerSelector(asList(serverSelector, settings.getServerSelector(), latencyMinimizingServerSelector)); - } + private static ServerSelector getCompleteServerSelector( + final ServerSelector serverSelector, + final ServerDeprioritization serverDeprioritization, + final ServersSnapshot serversSnapshot, + final ClusterSettings settings) { + List selectors = Stream.of( + getRaceConditionPreFilteringSelector(serversSnapshot), + serverSelector, + serverDeprioritization.getServerSelector(), + settings.getServerSelector(), // may be null + new LatencyMinimizingServerSelector(settings.getLocalThreshold(MILLISECONDS), MILLISECONDS), + AtMostTwoRandomServerSelector.instance(), + new MinimumOperationCountServerSelector(serversSnapshot) + ).filter(Objects::nonNull).collect(toList()); + return new CompositeServerSelector(selectors); + } + + private static ServerSelector getRaceConditionPreFilteringSelector(final ServersSnapshot serversSnapshot) { + // The set of `Server`s maintained by the `Cluster` is updated concurrently with `clusterDescription` being read. + // Additionally, that set of servers continues to be concurrently updated while `serverSelector` selects. + // This race condition means that we are not guaranteed to observe all the servers from `clusterDescription` + // among the `Server`s maintained by the `Cluster`. + // To deal with this race condition, we take `serversSnapshot` of that set of `Server`s + // (the snapshot itself does not have to be atomic) non-atomically with reading `clusterDescription` + // (this means, `serversSnapshot` and `clusterDescription` are not guaranteed to be consistent with each other), + // and do pre-filtering to make sure that the only `ServerDescription`s we may select, + // are of those `Server`s that are known to both `clusterDescription` and `serversSnapshot`. + return clusterDescription -> clusterDescription.getServerDescriptions() + .stream() + .filter(serverDescription -> serversSnapshot.containsServer(serverDescription.getAddress())) + .collect(toList()); } protected ClusterableServer createServer(final ServerAddress serverAddress) { @@ -399,7 +414,6 @@ private MongoException createAndLogTimeoutException( private static final class ServerSelectionRequest { private final OperationContext operationContext; private final ServerSelector originalSelector; - private final ServerSelector compositeSelector; @Nullable private final Long maxWaitTimeNanos; private final SingleResultCallback callback; @@ -407,13 +421,12 @@ private static final class ServerSelectionRequest { private CountDownLatch phase; ServerSelectionRequest(final OperationContext operationContext, - final ServerSelector serverSelector, final ServerSelector compositeSelector, + final ServerSelector serverSelector, @Nullable final Long maxWaitTimeNanos, final SingleResultCallback callback) { this.operationContext = operationContext; this.originalSelector = serverSelector; - this.compositeSelector = compositeSelector; this.maxWaitTimeNanos = maxWaitTimeNanos; this.callback = callback; } diff --git a/driver-core/src/main/com/mongodb/internal/connection/Cluster.java b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java index a3a649b10a6..358eb90a175 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/Cluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java @@ -18,9 +18,9 @@ import com.mongodb.ServerAddress; +import com.mongodb.annotations.ThreadSafe; import com.mongodb.connection.ClusterId; import com.mongodb.event.ServerDescriptionChangedEvent; -import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ClusterSettings; @@ -29,8 +29,6 @@ import java.io.Closeable; -import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; - /** * Represents a cluster of MongoDB servers. Implementations can define the behaviour depending upon the type of cluster. * @@ -43,9 +41,7 @@ public interface Cluster extends Closeable { ClusterId getClusterId(); - @Nullable - @VisibleForTesting(otherwise = PRIVATE) - ClusterableServer getServer(ServerAddress serverAddress); + ServersSnapshot getServersSnapshot(); /** * Get the current description of this cluster. @@ -89,4 +85,17 @@ void selectServerAsync(ServerSelector serverSelector, OperationContext operation * Server Discovery And Monitoring specification. */ void onChange(ServerDescriptionChangedEvent event); + + /** + * A non-atomic snapshot of the servers in a {@link Cluster}. + */ + @ThreadSafe + interface ServersSnapshot { + @Nullable + Server getServer(ServerAddress serverAddress); + + default boolean containsServer(final ServerAddress serverAddress) { + return getServer(serverAddress) != null; + } + } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java index dff239ab204..efc6c4bfb47 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java @@ -181,10 +181,11 @@ public ClusterId getClusterId() { } @Override - public ClusterableServer getServer(final ServerAddress serverAddress) { + public ServersSnapshot getServersSnapshot() { isTrue("open", !isClosed()); waitForSrv(); - return assertNotNull(server); + ClusterableServer server = assertNotNull(this.server); + return serverAddress -> server; } @Override diff --git a/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java index 657aaccfab9..683f6adfbf8 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java +++ b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java @@ -15,7 +15,19 @@ */ package com.mongodb.internal.connection; +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.ServerSelector; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; +import static java.util.stream.Collectors.toList; /** *

    This class is not part of the public API and may be removed or changed at any time

    @@ -23,12 +35,82 @@ public class OperationContext { private static final AtomicLong NEXT_ID = new AtomicLong(0); private final long id; + private final ServerDeprioritization serverDeprioritization; public OperationContext() { id = NEXT_ID.incrementAndGet(); + serverDeprioritization = new ServerDeprioritization(); } public long getId() { return id; } + + /** + * @return The same {@link ServerDeprioritization} if called on the same {@link OperationContext}. + */ + public ServerDeprioritization getServerDeprioritization() { + return serverDeprioritization; + } + + public static final class ServerDeprioritization { + @Nullable + private ServerAddress candidate; + private final Set deprioritized; + private final DeprioritizingSelector selector; + + private ServerDeprioritization() { + candidate = null; + deprioritized = new HashSet<>(); + selector = new DeprioritizingSelector(); + } + + /** + * The returned {@link ServerSelector} tries to {@linkplain ServerSelector#select(ClusterDescription) select} + * only the {@link ServerDescription}s that do not have deprioritized {@link ServerAddress}es. + * If no such {@link ServerDescription} can be selected, then it selects {@link ClusterDescription#getServerDescriptions()}. + */ + ServerSelector getServerSelector() { + return selector; + } + + void updateCandidate(final ServerAddress serverAddress) { + candidate = serverAddress; + } + + public void onAttemptFailure(final Throwable failure) { + if (candidate == null || failure instanceof MongoConnectionPoolClearedException) { + candidate = null; + return; + } + deprioritized.add(candidate); + } + + /** + * {@link ServerSelector} requires thread safety, but that is only because a user may specify + * {@link com.mongodb.connection.ClusterSettings.Builder#serverSelector(ServerSelector)}, + * which indeed may be used concurrently. {@link DeprioritizingSelector} does not need to be thread-safe. + */ + private final class DeprioritizingSelector implements ServerSelector { + private DeprioritizingSelector() { + } + + @Override + public List select(final ClusterDescription clusterDescription) { + List serverDescriptions = clusterDescription.getServerDescriptions(); + if (!isEnabled(clusterDescription.getType())) { + return serverDescriptions; + } + List nonDeprioritizedServerDescriptions = serverDescriptions + .stream() + .filter(serverDescription -> !deprioritized.contains(serverDescription.getAddress())) + .collect(toList()); + return nonDeprioritizedServerDescriptions.isEmpty() ? serverDescriptions : nonDeprioritizedServerDescriptions; + } + + private boolean isEnabled(final ClusterType clusterType) { + return clusterType == ClusterType.SHARDED; + } + } + } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java index ce76522ac1d..3c9d3b126bf 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java @@ -17,7 +17,6 @@ package com.mongodb.internal.connection; import com.mongodb.MongoConfigurationException; -import com.mongodb.ServerAddress; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ClusterId; @@ -69,9 +68,10 @@ protected void connect() { } @Override - public ClusterableServer getServer(final ServerAddress serverAddress) { + public ServersSnapshot getServersSnapshot() { isTrue("open", !isClosed()); - return assertNotNull(server.get()); + ClusterableServer server = assertNotNull(this.server.get()); + return serverAddress -> server; } @Override diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java index 163521631d2..b56f624bef5 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java @@ -52,6 +52,8 @@ import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; import static com.mongodb.internal.operation.CommandOperationHelper.isRetryWritesEnabled; import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableReadAttemptFailure; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableWriteAttemptFailure; import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; @@ -285,7 +287,7 @@ static void createReadCommandAndExecuteAsync( static AsyncCallbackSupplier decorateReadWithRetriesAsync(final RetryState retryState, final OperationContext operationContext, final AsyncCallbackSupplier asyncReadFunction) { - return new RetryingAsyncCallbackSupplier<>(retryState, CommandOperationHelper::chooseRetryableReadException, + return new RetryingAsyncCallbackSupplier<>(retryState, onRetryableReadAttemptFailure(operationContext), CommandOperationHelper::shouldAttemptToRetryRead, callback -> { logRetryExecute(retryState, operationContext); asyncReadFunction.get(callback); @@ -294,7 +296,7 @@ static AsyncCallbackSupplier decorateReadWithRetriesAsync(final RetryStat static AsyncCallbackSupplier decorateWriteWithRetriesAsync(final RetryState retryState, final OperationContext operationContext, final AsyncCallbackSupplier asyncWriteFunction) { - return new RetryingAsyncCallbackSupplier<>(retryState, CommandOperationHelper::chooseRetryableWriteException, + return new RetryingAsyncCallbackSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), CommandOperationHelper::shouldAttemptToRetryWrite, callback -> { logRetryExecute(retryState, operationContext); asyncWriteFunction.get(callback); diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java index fb1cc3c2da2..3f47ba06f89 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java @@ -36,6 +36,7 @@ import org.bson.BsonDocument; import java.util.List; +import java.util.function.BinaryOperator; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.assertFalse; @@ -51,8 +52,14 @@ interface CommandCreator { BsonDocument create(ServerDescription serverDescription, ConnectionDescription connectionDescription); } + static BinaryOperator onRetryableReadAttemptFailure(final OperationContext operationContext) { + return (@Nullable Throwable previouslyChosenException, Throwable mostRecentAttemptException) -> { + operationContext.getServerDeprioritization().onAttemptFailure(mostRecentAttemptException); + return chooseRetryableReadException(previouslyChosenException, mostRecentAttemptException); + }; + } - static Throwable chooseRetryableReadException( + private static Throwable chooseRetryableReadException( @Nullable final Throwable previouslyChosenException, final Throwable mostRecentAttemptException) { assertFalse(mostRecentAttemptException instanceof ResourceSupplierInternalException); if (previouslyChosenException == null @@ -64,7 +71,14 @@ static Throwable chooseRetryableReadException( } } - static Throwable chooseRetryableWriteException( + static BinaryOperator onRetryableWriteAttemptFailure(final OperationContext operationContext) { + return (@Nullable Throwable previouslyChosenException, Throwable mostRecentAttemptException) -> { + operationContext.getServerDeprioritization().onAttemptFailure(mostRecentAttemptException); + return chooseRetryableWriteException(previouslyChosenException, mostRecentAttemptException); + }; + } + + private static Throwable chooseRetryableWriteException( @Nullable final Throwable previouslyChosenException, final Throwable mostRecentAttemptException) { if (previouslyChosenException == null) { if (mostRecentAttemptException instanceof ResourceSupplierInternalException) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java index fb54fb33994..fe58fb0bd75 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -62,6 +62,7 @@ import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; import static com.mongodb.internal.operation.CommandOperationHelper.addRetryableWriteErrorLabel; import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableWriteAttemptFailure; import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; @@ -140,7 +141,7 @@ public Boolean getRetryWrites() { private Supplier decorateWriteWithRetries(final RetryState retryState, final OperationContext operationContext, final Supplier writeFunction) { - return new RetryingSyncSupplier<>(retryState, CommandOperationHelper::chooseRetryableWriteException, + return new RetryingSyncSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), this::shouldAttemptToRetryWrite, () -> { logRetryExecute(retryState, operationContext); return writeFunction.get(); @@ -149,7 +150,7 @@ private Supplier decorateWriteWithRetries(final RetryState retryState, fi private AsyncCallbackSupplier decorateWriteWithRetries(final RetryState retryState, final OperationContext operationContext, final AsyncCallbackSupplier writeFunction) { - return new RetryingAsyncCallbackSupplier<>(retryState, CommandOperationHelper::chooseRetryableWriteException, + return new RetryingAsyncCallbackSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), this::shouldAttemptToRetryWrite, callback -> { logRetryExecute(retryState, operationContext); writeFunction.get(callback); diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java index a10604bb717..5610f84dd36 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java @@ -51,6 +51,8 @@ import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableReadAttemptFailure; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableWriteAttemptFailure; import static com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; import static com.mongodb.internal.operation.OperationHelper.canRetryWrite; @@ -274,7 +276,7 @@ static T createReadCommandAndExecute( static Supplier decorateWriteWithRetries(final RetryState retryState, final OperationContext operationContext, final Supplier writeFunction) { - return new RetryingSyncSupplier<>(retryState, CommandOperationHelper::chooseRetryableWriteException, + return new RetryingSyncSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), CommandOperationHelper::shouldAttemptToRetryWrite, () -> { logRetryExecute(retryState, operationContext); return writeFunction.get(); @@ -283,7 +285,7 @@ static Supplier decorateWriteWithRetries(final RetryState retryState, static Supplier decorateReadWithRetries(final RetryState retryState, final OperationContext operationContext, final Supplier readFunction) { - return new RetryingSyncSupplier<>(retryState, CommandOperationHelper::chooseRetryableReadException, + return new RetryingSyncSupplier<>(retryState, onRetryableReadAttemptFailure(operationContext), CommandOperationHelper::shouldAttemptToRetryRead, () -> { logRetryExecute(retryState, operationContext); return readFunction.get(); diff --git a/driver-core/src/main/com/mongodb/internal/selector/AtMostTwoRandomServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/AtMostTwoRandomServerSelector.java new file mode 100644 index 00000000000..22f55ac0245 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/AtMostTwoRandomServerSelector.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.annotations.Immutable; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; + +/** + * {@linkplain #select(ClusterDescription) Selects} at most two {@link ServerDescription}s at random. This selector uses the + * Fisher–Yates, a.k.a. Durstenfeld, shuffle algorithm. + * + *

    This class is not part of the public API and may be removed or changed at any time

    + */ +@Immutable +public final class AtMostTwoRandomServerSelector implements ServerSelector { + private static final int TWO = 2; + private static final AtMostTwoRandomServerSelector INSTANCE = new AtMostTwoRandomServerSelector(); + + private AtMostTwoRandomServerSelector() { + } + + public static AtMostTwoRandomServerSelector instance() { + return INSTANCE; + } + + @Override + public List select(final ClusterDescription clusterDescription) { + List serverDescriptions = new ArrayList<>(clusterDescription.getServerDescriptions()); + List result = new ArrayList<>(); + ThreadLocalRandom random = ThreadLocalRandom.current(); + for (int i = serverDescriptions.size() - 1; i >= 0; i--) { + Collections.swap(serverDescriptions, i, random.nextInt(i + 1)); + result.add(serverDescriptions.get(i)); + if (result.size() == TWO) { + break; + } + } + return result; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/MinimumOperationCountServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/MinimumOperationCountServerSelector.java new file mode 100644 index 00000000000..8acc5978c1f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/MinimumOperationCountServerSelector.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Cluster.ServersSnapshot; +import com.mongodb.internal.connection.Server; +import com.mongodb.selector.ServerSelector; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.util.Collections.emptyList; +import static java.util.Comparator.comparingInt; + +/** + * {@linkplain #select(ClusterDescription) Selects} at most one {@link ServerDescription} + * corresponding to a {@link ServersSnapshot#getServer(ServerAddress) server} with the smallest {@link Server#operationCount()}. + * + *

    This class is not part of the public API and may be removed or changed at any time

    + */ +@ThreadSafe +public final class MinimumOperationCountServerSelector implements ServerSelector { + private final ServersSnapshot serversSnapshot; + + /** + * @param serversSnapshot Must {@linkplain ServersSnapshot#containsServer(ServerAddress) contain} {@link Server}s corresponding to + * {@linkplain ClusterDescription#getServerDescriptions() all} {@link ServerDescription}s + * in the {@link ClusterDescription} passed to {@link #select(ClusterDescription)}. + */ + public MinimumOperationCountServerSelector(final ServersSnapshot serversSnapshot) { + this.serversSnapshot = serversSnapshot; + } + + @Override + public List select(final ClusterDescription clusterDescription) { + return clusterDescription.getServerDescriptions() + .stream() + .min(comparingInt(serverDescription -> + assertNotNull(serversSnapshot.getServer(serverDescription.getAddress())) + .operationCount())) + .map(Collections::singletonList) + .orElse(emptyList()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java index 5ac3c35d4ae..c0924c3d74d 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java @@ -81,12 +81,13 @@ protected void applyResponse(final BsonArray response) { protected void applyApplicationError(final BsonDocument applicationError) { ServerAddress serverAddress = new ServerAddress(applicationError.getString("address").getValue()); int errorGeneration = applicationError.getNumber("generation", - new BsonInt32(((DefaultServer) getCluster().getServer(serverAddress)).getConnectionPool().getGeneration())).intValue(); + new BsonInt32(((DefaultServer) getCluster().getServersSnapshot().getServer(serverAddress)) + .getConnectionPool().getGeneration())).intValue(); int maxWireVersion = applicationError.getNumber("maxWireVersion").intValue(); String when = applicationError.getString("when").getValue(); String type = applicationError.getString("type").getValue(); - DefaultServer server = (DefaultServer) cluster.getServer(serverAddress); + DefaultServer server = (DefaultServer) cluster.getServersSnapshot().getServer(serverAddress); RuntimeException exception; switch (type) { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy index c7428d2f4e7..0f51bab44a8 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy @@ -46,6 +46,9 @@ import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY import static java.util.concurrent.TimeUnit.MILLISECONDS import static java.util.concurrent.TimeUnit.SECONDS +/** + * Add new tests to {@link BaseClusterTest}. + */ class BaseClusterSpecification extends Specification { private final ServerAddress firstServer = new ServerAddress('localhost:27017') @@ -67,8 +70,11 @@ class BaseClusterSpecification extends Specification { } @Override - ClusterableServer getServer(final ServerAddress serverAddress) { - throw new UnsupportedOperationException() + Cluster.ServersSnapshot getServersSnapshot() { + Cluster.ServersSnapshot result = { + serverAddress -> throw new UnsupportedOperationException() + } + result } @Override diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java new file mode 100644 index 00000000000..641f814a6dd --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.mockito.MongoMockito; +import com.mongodb.internal.selector.ServerAddressSelector; +import org.junit.jupiter.api.Test; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.mockito.Mockito.when; + +/** + * @see BaseClusterSpecification + */ +final class BaseClusterTest { + @Test + void selectServerToleratesWhenThereIsNoServerForTheSelectedAddress() { + ServerAddress serverAddressA = new ServerAddress("a"); + ServerAddress serverAddressB = new ServerAddress("b"); + Server serverB = MongoMockito.mock(Server.class, server -> + when(server.operationCount()).thenReturn(0)); + ClusterDescription clusterDescriptionAB = new ClusterDescription(ClusterConnectionMode.MULTIPLE, ClusterType.SHARDED, + asList(serverDescription(serverAddressA), serverDescription(serverAddressB))); + Cluster.ServersSnapshot serversSnapshotB = serverAddress -> serverAddress.equals(serverAddressB) ? serverB : null; + assertDoesNotThrow(() -> BaseCluster.createCompleteSelectorAndSelectServer( + new ServerAddressSelector(serverAddressA), + clusterDescriptionAB, + serversSnapshotB, + new OperationContext().getServerDeprioritization(), + ClusterSettings.builder().build())); + } + + private static ServerDescription serverDescription(final ServerAddress serverAddress) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(serverAddress) + .build(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy index e849f8fa203..a0b96706f0e 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy @@ -394,8 +394,11 @@ class DefaultServerSpecification extends Specification { } @Override - ClusterableServer getServer(final ServerAddress serverAddress) { - throw new UnsupportedOperationException() + Cluster.ServersSnapshot getServersSnapshot() { + Cluster.ServersSnapshot result = { + serverAddress -> throw new UnsupportedOperationException() + } + result } @Override diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy index 096053a0b11..f14305bb6b8 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy @@ -87,14 +87,14 @@ class MultiServerClusterSpecification extends Specification { cluster.getCurrentDescription().connectionMode == MULTIPLE } - def 'should not get server when closed'() { + def 'should not get servers snapshot when closed'() { given: def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts(Arrays.asList(firstServer)).mode(MULTIPLE).build(), factory) cluster.close() when: - cluster.getServer(firstServer) + cluster.getServersSnapshot() then: thrown(IllegalStateException) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java new file mode 100644 index 00000000000..816bca3f3f9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java @@ -0,0 +1,124 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.internal.connection.OperationContext.ServerDeprioritization; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.EnumSource.Mode.EXCLUDE; + +final class ServerDeprioritizationTest { + private static final ServerDescription SERVER_A = serverDescription("a"); + private static final ServerDescription SERVER_B = serverDescription("b"); + private static final ServerDescription SERVER_C = serverDescription("c"); + private static final List ALL_SERVERS = unmodifiableList(asList(SERVER_A, SERVER_B, SERVER_C)); + private static final ClusterDescription REPLICA_SET = clusterDescription(ClusterType.REPLICA_SET); + private static final ClusterDescription SHARDED_CLUSTER = clusterDescription(ClusterType.SHARDED); + + private ServerDeprioritization serverDeprioritization; + + @BeforeEach + void beforeEach() { + serverDeprioritization = new OperationContext().getServerDeprioritization(); + } + + @Test + void selectNoneDeprioritized() { + assertAll( + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)), + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(REPLICA_SET)) + ); + } + + @Test + void selectSomeDeprioritized() { + deprioritize(SERVER_B); + assertAll( + () -> assertEquals(asList(SERVER_A, SERVER_C), serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)), + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(REPLICA_SET)) + ); + } + + @Test + void selectAllDeprioritized() { + deprioritize(SERVER_A); + deprioritize(SERVER_B); + deprioritize(SERVER_C); + assertAll( + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)), + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(REPLICA_SET)) + ); + } + + @ParameterizedTest + @EnumSource(value = ClusterType.class, mode = EXCLUDE, names = {"SHARDED"}) + void serverSelectorSelectsAllIfNotShardedCluster(final ClusterType clusterType) { + serverDeprioritization.updateCandidate(SERVER_A.getAddress()); + serverDeprioritization.onAttemptFailure(new RuntimeException()); + assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(clusterDescription(clusterType))); + } + + @Test + void onAttemptFailureIgnoresIfPoolClearedException() { + serverDeprioritization.updateCandidate(SERVER_A.getAddress()); + serverDeprioritization.onAttemptFailure( + new MongoConnectionPoolClearedException(new ServerId(new ClusterId(), new ServerAddress()), null)); + assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)); + } + + @Test + void onAttemptFailureDoesNotThrowIfNoCandidate() { + assertDoesNotThrow(() -> serverDeprioritization.onAttemptFailure(new RuntimeException())); + } + + private void deprioritize(final ServerDescription... serverDescriptions) { + for (ServerDescription serverDescription : serverDescriptions) { + serverDeprioritization.updateCandidate(serverDescription.getAddress()); + serverDeprioritization.onAttemptFailure(new RuntimeException()); + } + } + + private static ServerDescription serverDescription(final String host) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(new ServerAddress(host)) + .build(); + } + + private static ClusterDescription clusterDescription(final ClusterType clusterType) { + return new ClusterDescription(ClusterConnectionMode.MULTIPLE, clusterType, ALL_SERVERS); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java index 4af47cb9557..2bc41fee1be 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java @@ -120,7 +120,7 @@ private void assertServer(final String serverName, final BsonDocument expectedSe if (expectedServerDescriptionDocument.isDocument("pool")) { int expectedGeneration = expectedServerDescriptionDocument.getDocument("pool").getNumber("generation").intValue(); - DefaultServer server = (DefaultServer) getCluster().getServer(new ServerAddress(serverName)); + DefaultServer server = (DefaultServer) getCluster().getServersSnapshot().getServer(new ServerAddress(serverName)); assertEquals(expectedGeneration, server.getConnectionPool().getGeneration()); } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java index dd1c95c5e59..6f1a9d25bb1 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java @@ -20,6 +20,7 @@ import com.mongodb.ServerAddress; import com.mongodb.assertions.Assertions; import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; import com.mongodb.internal.selector.ReadPreferenceServerSelector; import com.mongodb.selector.ServerSelector; import org.bson.BsonArray; @@ -56,7 +57,7 @@ @RunWith(Parameterized.class) public class ServerSelectionWithinLatencyWindowTest { private final ClusterDescription clusterDescription; - private final Map serverCatalog; + private final Cluster.ServersSnapshot serversSnapshot; private final int iterations; private final Outcome outcome; @@ -65,7 +66,7 @@ public ServerSelectionWithinLatencyWindowTest( @SuppressWarnings("unused") final String description, final BsonDocument definition) { clusterDescription = buildClusterDescription(definition.getDocument("topology_description"), null); - serverCatalog = serverCatalog(definition.getArray("mocked_topology_state")); + serversSnapshot = serverCatalog(definition.getArray("mocked_topology_state")); iterations = definition.getInt32("iterations").getValue(); outcome = Outcome.parse(definition.getDocument("outcome")); } @@ -73,9 +74,11 @@ public ServerSelectionWithinLatencyWindowTest( @Test public void shouldPassAllOutcomes() { ServerSelector selector = new ReadPreferenceServerSelector(ReadPreference.nearest()); + OperationContext.ServerDeprioritization emptyServerDeprioritization = new OperationContext().getServerDeprioritization(); + ClusterSettings defaultClusterSettings = ClusterSettings.builder().build(); Map> selectionResultsGroupedByServerAddress = IntStream.range(0, iterations) - .mapToObj(i -> BaseCluster.selectServer(selector, clusterDescription, - address -> Assertions.assertNotNull(serverCatalog.get(address)))) + .mapToObj(i -> BaseCluster.createCompleteSelectorAndSelectServer(selector, clusterDescription, serversSnapshot, + emptyServerDeprioritization, defaultClusterSettings)) .collect(groupingBy(serverTuple -> serverTuple.getServerDescription().getAddress())); Map selectionFrequencies = selectionResultsGroupedByServerAddress.entrySet() .stream() @@ -97,8 +100,8 @@ public static Collection data() { .collect(toList()); } - private static Map serverCatalog(final BsonArray mockedTopologyState) { - return mockedTopologyState.stream() + private static Cluster.ServersSnapshot serverCatalog(final BsonArray mockedTopologyState) { + Map serverMap = mockedTopologyState.stream() .map(BsonValue::asDocument) .collect(toMap( el -> new ServerAddress(el.getString("address").getValue()), @@ -108,6 +111,7 @@ private static Map serverCatalog(final BsonArray mockedTo when(server.operationCount()).thenReturn(operationCount); return server; })); + return serverAddress -> Assertions.assertNotNull(serverMap.get(serverAddress)); } private static final class Outcome { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy index f47ab6644d8..a3a0f6a2d6f 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy @@ -76,21 +76,21 @@ class SingleServerClusterSpecification extends Specification { sendNotification(firstServer, STANDALONE) then: - cluster.getServer(firstServer) == factory.getServer(firstServer) + cluster.getServersSnapshot().getServer(firstServer) == factory.getServer(firstServer) cleanup: cluster?.close() } - def 'should not get server when closed'() { + def 'should not get servers snapshot when closed'() { given: def cluster = new SingleServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(SINGLE).hosts(Arrays.asList(firstServer)).build(), factory) cluster.close() when: - cluster.getServer(firstServer) + cluster.getServersSnapshot() then: thrown(IllegalStateException) diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/AtMostTwoRandomServerSelectorTest.java b/driver-core/src/test/unit/com/mongodb/internal/selector/AtMostTwoRandomServerSelectorTest.java new file mode 100644 index 00000000000..1d174ccabe7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/AtMostTwoRandomServerSelectorTest.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +final class AtMostTwoRandomServerSelectorTest { + @ParameterizedTest + @MethodSource("args") + void select( + final List hosts, + final int numberOfSelectIterations, + final double expectedCount, + final double frequencyTolerance, + final int expectedSelectedSize) { + ClusterDescription clusterDescription = clusterDescription(hosts); + HashMap actualCounters = new HashMap<>(); + for (int i = 0; i < numberOfSelectIterations; i++) { + List selected = AtMostTwoRandomServerSelector.instance().select(clusterDescription); + assertEquals(expectedSelectedSize, selected.size(), selected::toString); + selected.forEach(serverDescription -> actualCounters.merge(serverDescription.getAddress(), 1, Integer::sum)); + } + actualCounters.forEach((serverAddress, counter) -> + assertEquals( + expectedCount / numberOfSelectIterations, + (double) counter / numberOfSelectIterations, + frequencyTolerance, + () -> String.format("serverAddress=%s, counter=%d, actualCounters=%s", serverAddress, counter, actualCounters))); + } + + private static Stream args() { + int smallNumberOfSelectIterations = 10; + int largeNumberOfSelectIterations = 2_000; + int maxSelectedSize = 2; + return Stream.of( + arguments(emptyList(), + smallNumberOfSelectIterations, 0, 0, 0), + arguments(singletonList("1"), + smallNumberOfSelectIterations, smallNumberOfSelectIterations, 0, 1), + arguments(asList("1", "2"), + smallNumberOfSelectIterations, smallNumberOfSelectIterations, 0, maxSelectedSize), + arguments(asList("1", "2", "3"), + largeNumberOfSelectIterations, (double) maxSelectedSize * largeNumberOfSelectIterations / 3, 0.05, maxSelectedSize), + arguments(asList("1", "2", "3", "4", "5", "6", "7"), + largeNumberOfSelectIterations, (double) maxSelectedSize * largeNumberOfSelectIterations / 7, 0.05, maxSelectedSize) + ); + } + + private static ClusterDescription clusterDescription(final List hosts) { + return new ClusterDescription(ClusterConnectionMode.MULTIPLE, ClusterType.REPLICA_SET, serverDescriptions(hosts)); + } + + private static List serverDescriptions(final Collection hosts) { + return hosts.stream() + .map(AtMostTwoRandomServerSelectorTest::serverDescription) + .collect(toList()); + } + + private static ServerDescription serverDescription(final String host) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(new ServerAddress(host)) + .build(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/MinimumOperationCountServerSelectorTest.java b/driver-core/src/test/unit/com/mongodb/internal/selector/MinimumOperationCountServerSelectorTest.java new file mode 100644 index 00000000000..3a0d754cb97 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/MinimumOperationCountServerSelectorTest.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.Server; +import com.mongodb.internal.mockito.MongoMockito; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.when; + +final class MinimumOperationCountServerSelectorTest { + @ParameterizedTest + @MethodSource("args") + void select(final Map hostToOperationCount, final List expectedHosts) { + ClusterDescriptionAndServersSnapshot pair = clusterDescriptionAndServersSnapshot(hostToOperationCount); + List actualHosts = new MinimumOperationCountServerSelector(pair.getServersSnapshot()) + .select(pair.getClusterDescription()) + .stream() + .map(serverDescription -> serverDescription.getAddress().getHost()) + .collect(toList()); + assertEquals(expectedHosts, actualHosts, hostToOperationCount::toString); + } + + private static Stream args() { + return Stream.of( + arguments(emptyMap(), emptyList()), + arguments(singletonMap("a", 0), singletonList("a")), + arguments(linkedMap(m -> { + m.put("b", 0); + m.put("a", 5); + }), singletonList("b")), + arguments(linkedMap(m -> { + m.put("b", 2); + m.put("a", 3); + m.put("c", 2); + }), singletonList("b")), + arguments(linkedMap(m -> { + m.put("b", 5); + m.put("a", 5); + m.put("e", 0); + m.put("c", 5); + m.put("d", 8); + }), singletonList("e")) + ); + } + + private static ClusterDescriptionAndServersSnapshot clusterDescriptionAndServersSnapshot(final Map hostToOperationCount) { + ClusterDescription clusterDescription = new ClusterDescription( + ClusterConnectionMode.MULTIPLE, ClusterType.REPLICA_SET, serverDescriptions(hostToOperationCount.keySet())); + Map serverAddressToOperationCount = hostToOperationCount.entrySet() + .stream().collect(toMap(entry -> new ServerAddress(entry.getKey()), Map.Entry::getValue)); + Cluster.ServersSnapshot serversSnapshot = serverAddress -> { + int operationCount = serverAddressToOperationCount.get(serverAddress); + return MongoMockito.mock(Server.class, server -> + when(server.operationCount()).thenReturn(operationCount)); + }; + return new ClusterDescriptionAndServersSnapshot(clusterDescription, serversSnapshot); + } + + private static List serverDescriptions(final Collection hosts) { + return hosts.stream() + .map(MinimumOperationCountServerSelectorTest::serverDescription) + .collect(toList()); + } + + private static ServerDescription serverDescription(final String host) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(new ServerAddress(host)) + .build(); + } + + private static LinkedHashMap linkedMap(final Consumer> filler) { + LinkedHashMap result = new LinkedHashMap<>(); + filler.accept(result); + return result; + } + + private static final class ClusterDescriptionAndServersSnapshot { + private final ClusterDescription clusterDescription; + private final Cluster.ServersSnapshot serversSnapshot; + + private ClusterDescriptionAndServersSnapshot( + final ClusterDescription clusterDescription, + final Cluster.ServersSnapshot serversSnapshot) { + this.clusterDescription = clusterDescription; + this.serversSnapshot = serversSnapshot; + } + + ClusterDescription getClusterDescription() { + return clusterDescription; + } + + Cluster.ServersSnapshot getServersSnapshot() { + return serversSnapshot; + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsProseTest.java index b29d2df8241..21ef7698248 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsProseTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsProseTest.java @@ -16,8 +16,10 @@ package com.mongodb.reactivestreams.client; +import com.mongodb.client.MongoCursor; import com.mongodb.client.RetryableWritesProseTest; import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.Document; import org.junit.jupiter.api.Test; import java.util.concurrent.ExecutionException; @@ -29,16 +31,48 @@ * See * Retryable Reads Tests. */ -public class RetryableReadsProseTest { +final class RetryableReadsProseTest { /** * See * * PoolClearedError Retryability Test. */ @Test - public void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { + void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { RetryableWritesProseTest.poolClearedExceptionMustBeRetryable( mongoClientSettings -> new SyncMongoClient(MongoClients.create(mongoClientSettings)), mongoCollection -> mongoCollection.find(eq(0)).iterator().hasNext(), "find", false); } + + /** + * See + * + * Retryable Reads Are Retried on a Different mongos When One is Available. + */ + @Test + void retriesOnDifferentMongosWhenAvailable() { + RetryableWritesProseTest.retriesOnDifferentMongosWhenAvailable( + mongoClientSettings -> new SyncMongoClient(MongoClients.create(mongoClientSettings)), + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } + + /** + * See + * + * Retryable Reads Are Retried on the Same mongos When No Others are Available. + */ + @Test + void retriesOnSameMongosWhenAnotherNotAvailable() { + RetryableWritesProseTest.retriesOnSameMongosWhenAnotherNotAvailable( + mongoClientSettings -> new SyncMongoClient(MongoClients.create(mongoClientSettings)), + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java index fcfd3160515..11b26d9d28a 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java @@ -56,7 +56,7 @@ public void setUp() { @Test public void testRetryWritesWithInsertOneAgainstMMAPv1RaisesError() { - assumeTrue(canRunTests()); + assumeTrue(canRunMmapv1Tests()); boolean exceptionFound = false; try { @@ -73,7 +73,7 @@ public void testRetryWritesWithInsertOneAgainstMMAPv1RaisesError() { @Test public void testRetryWritesWithFindOneAndDeleteAgainstMMAPv1RaisesError() { - assumeTrue(canRunTests()); + assumeTrue(canRunMmapv1Tests()); boolean exceptionFound = false; try { @@ -107,7 +107,27 @@ public void originalErrorMustBePropagatedIfNoWritesPerformed() throws Interrupte mongoClientSettings -> new SyncMongoClient(MongoClients.create(mongoClientSettings))); } - private boolean canRunTests() { + /** + * Prose test #4. + */ + @Test + public void retriesOnDifferentMongosWhenAvailable() { + com.mongodb.client.RetryableWritesProseTest.retriesOnDifferentMongosWhenAvailable( + mongoClientSettings -> new SyncMongoClient(MongoClients.create(mongoClientSettings)), + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + /** + * Prose test #5. + */ + @Test + public void retriesOnSameMongosWhenAnotherNotAvailable() { + com.mongodb.client.RetryableWritesProseTest.retriesOnSameMongosWhenAnotherNotAvailable( + mongoClientSettings -> new SyncMongoClient(MongoClients.create(mongoClientSettings)), + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + private boolean canRunMmapv1Tests() { Document storageEngine = (Document) getServerStatus().get("storageEngine"); return ((isSharded() || isDiscoverableReplicaSet()) diff --git a/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsProseTest.java index cb1f40bfe81..ccf18aad5b9 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsProseTest.java @@ -16,6 +16,7 @@ package com.mongodb.client; +import org.bson.Document; import org.junit.jupiter.api.Test; import java.util.concurrent.ExecutionException; @@ -27,15 +28,45 @@ * See * Retryable Reads Tests. */ -public class RetryableReadsProseTest { +final class RetryableReadsProseTest { /** * See * * PoolClearedError Retryability Test. */ @Test - public void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { + void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { RetryableWritesProseTest.poolClearedExceptionMustBeRetryable(MongoClients::create, mongoCollection -> mongoCollection.find(eq(0)).iterator().hasNext(), "find", false); } + + /** + * See + * + * Retryable Reads Are Retried on a Different mongos When One is Available. + */ + @Test + void retriesOnDifferentMongosWhenAvailable() { + RetryableWritesProseTest.retriesOnDifferentMongosWhenAvailable(MongoClients::create, + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } + + /** + * See + * + * Retryable Reads Are Retried on the Same mongos When No Others are Available. + */ + @Test + void retriesOnSameMongosWhenAnotherNotAvailable() { + RetryableWritesProseTest.retriesOnSameMongosWhenAnotherNotAvailable(MongoClients::create, + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java index c4da13c1e81..289efb7287b 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java @@ -16,18 +16,25 @@ package com.mongodb.client; +import com.mongodb.ConnectionString; import com.mongodb.Function; import com.mongodb.MongoClientException; import com.mongodb.MongoClientSettings; import com.mongodb.MongoException; +import com.mongodb.MongoServerException; import com.mongodb.MongoWriteConcernException; import com.mongodb.ServerAddress; import com.mongodb.assertions.Assertions; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; import com.mongodb.event.CommandListener; import com.mongodb.event.CommandSucceededEvent; import com.mongodb.event.ConnectionCheckOutFailedEvent; import com.mongodb.event.ConnectionCheckedOutEvent; import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.internal.connection.ServerAddressHelper; import com.mongodb.internal.connection.TestCommandListener; import com.mongodb.internal.connection.TestConnectionPoolListener; import org.bson.BsonArray; @@ -39,6 +46,9 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -50,6 +60,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static com.mongodb.ClusterFixture.getConnectionString; +import static com.mongodb.ClusterFixture.getMultiMongosConnectionString; import static com.mongodb.ClusterFixture.getServerStatus; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.isServerlessTest; @@ -59,10 +71,13 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan; import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.Fixture.getMultiMongosMongoClientSettingsBuilder; import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeFalse; @@ -82,7 +97,7 @@ public void setUp() { @Test public void testRetryWritesWithInsertOneAgainstMMAPv1RaisesError() { - assumeTrue(canRunTests()); + assumeTrue(canRunMmapv1Tests()); boolean exceptionFound = false; try { @@ -99,7 +114,7 @@ public void testRetryWritesWithInsertOneAgainstMMAPv1RaisesError() { @Test public void testRetryWritesWithFindOneAndDeleteAgainstMMAPv1RaisesError() { - assumeTrue(canRunTests()); + assumeTrue(canRunMmapv1Tests()); boolean exceptionFound = false; try { @@ -256,7 +271,134 @@ public void commandSucceeded(final CommandSucceededEvent event) { } } - private boolean canRunTests() { + /** + * Prose test #4. + */ + @Test + public void retriesOnDifferentMongosWhenAvailable() { + retriesOnDifferentMongosWhenAvailable(MongoClients::create, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + @SuppressWarnings("try") + public static void retriesOnDifferentMongosWhenAvailable( + final Function clientCreator, + final Function, R> operation, final String operationName, final boolean write) { + if (write) { + assumeTrue(serverVersionAtLeast(4, 4)); + } else { + assumeTrue(serverVersionAtLeast(4, 2)); + } + assumeTrue(isSharded()); + ConnectionString connectionString = getMultiMongosConnectionString(); + assumeTrue(connectionString != null); + ServerAddress s0Address = ServerAddressHelper.createServerAddress(connectionString.getHosts().get(0)); + ServerAddress s1Address = ServerAddressHelper.createServerAddress(connectionString.getHosts().get(1)); + BsonDocument failPointDocument = BsonDocument.parse( + "{\n" + + " configureFailPoint: \"failCommand\",\n" + + " mode: { times: 1 },\n" + + " data: {\n" + + " failCommands: [\"" + operationName + "\"],\n" + + (write + ? " errorLabels: [\"RetryableWriteError\"]," : "") + + " errorCode: 6\n" + + " }\n" + + "}\n"); + TestCommandListener commandListener = new TestCommandListener(singletonList("commandFailedEvent"), emptyList()); + try (FailPoint s0FailPoint = FailPoint.enable(failPointDocument, s0Address); + FailPoint s1FailPoint = FailPoint.enable(failPointDocument, s1Address); + MongoClient client = clientCreator.apply(getMultiMongosMongoClientSettingsBuilder() + .retryReads(true) + .retryWrites(true) + .addCommandListener(commandListener) + // explicitly specify only s0 and s1, in case `getMultiMongosMongoClientSettingsBuilder` has more + .applyToClusterSettings(builder -> builder.hosts(asList(s0Address, s1Address))) + .build())) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("retriesOnDifferentMongosWhenAvailable"); + collection.drop(); + commandListener.reset(); + assertThrows(MongoServerException.class, () -> operation.apply(collection)); + List failedCommandEvents = commandListener.getEvents(); + assertEquals(2, failedCommandEvents.size(), failedCommandEvents::toString); + List unexpectedCommandNames = failedCommandEvents.stream() + .map(CommandEvent::getCommandName) + .filter(commandName -> !commandName.equals(operationName)) + .collect(Collectors.toList()); + assertTrue(unexpectedCommandNames.isEmpty(), unexpectedCommandNames::toString); + Set failedServerAddresses = failedCommandEvents.stream() + .map(CommandEvent::getConnectionDescription) + .map(ConnectionDescription::getServerAddress) + .collect(Collectors.toSet()); + assertEquals(new HashSet<>(asList(s0Address, s1Address)), failedServerAddresses); + } + } + + /** + * Prose test #5. + */ + @Test + public void retriesOnSameMongosWhenAnotherNotAvailable() { + retriesOnSameMongosWhenAnotherNotAvailable(MongoClients::create, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + @SuppressWarnings("try") + public static void retriesOnSameMongosWhenAnotherNotAvailable( + final Function clientCreator, + final Function, R> operation, final String operationName, final boolean write) { + if (write) { + assumeTrue(serverVersionAtLeast(4, 4)); + } else { + assumeTrue(serverVersionAtLeast(4, 2)); + } + assumeTrue(isSharded()); + ConnectionString connectionString = getConnectionString(); + ServerAddress s0Address = ServerAddressHelper.createServerAddress(connectionString.getHosts().get(0)); + BsonDocument failPointDocument = BsonDocument.parse( + "{\n" + + " configureFailPoint: \"failCommand\",\n" + + " mode: { times: 1 },\n" + + " data: {\n" + + " failCommands: [\"" + operationName + "\"],\n" + + (write + ? " errorLabels: [\"RetryableWriteError\"]," : "") + + " errorCode: 6\n" + + " }\n" + + "}\n"); + TestCommandListener commandListener = new TestCommandListener( + asList("commandFailedEvent", "commandSucceededEvent"), emptyList()); + try (FailPoint s0FailPoint = FailPoint.enable(failPointDocument, s0Address); + MongoClient client = clientCreator.apply(getMongoClientSettingsBuilder() + .retryReads(true) + .retryWrites(true) + .addCommandListener(commandListener) + // explicitly specify only s0, in case `getMongoClientSettingsBuilder` has more + .applyToClusterSettings(builder -> builder + .hosts(singletonList(s0Address)) + .mode(ClusterConnectionMode.MULTIPLE)) + .build())) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("retriesOnSameMongosWhenAnotherNotAvailable"); + collection.drop(); + commandListener.reset(); + operation.apply(collection); + List commandEvents = commandListener.getEvents(); + assertEquals(2, commandEvents.size(), commandEvents::toString); + List unexpectedCommandNames = commandEvents.stream() + .map(CommandEvent::getCommandName) + .filter(commandName -> !commandName.equals(operationName)) + .collect(Collectors.toList()); + assertTrue(unexpectedCommandNames.isEmpty(), unexpectedCommandNames::toString); + assertInstanceOf(CommandFailedEvent.class, commandEvents.get(0), commandEvents::toString); + assertEquals(s0Address, commandEvents.get(0).getConnectionDescription().getServerAddress(), commandEvents::toString); + assertInstanceOf(CommandSucceededEvent.class, commandEvents.get(1), commandEvents::toString); + assertEquals(s0Address, commandEvents.get(1).getConnectionDescription().getServerAddress(), commandEvents::toString); + } + } + + private boolean canRunMmapv1Tests() { Document storageEngine = (Document) getServerStatus().get("storageEngine"); return ((isSharded() || isDiscoverableReplicaSet()) From d6e3799a00af45310e0250d2c1f5e7429f9ce544 Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Wed, 29 May 2024 11:38:23 -0600 Subject: [PATCH 19/90] Add throwTranslatedWriteException, refactoring, async helper (#1379) JAVA-5379 --- .../mongodb/internal/async/AsyncRunnable.java | 36 ++ .../internal/connection/CommandHelper.java | 3 +- .../connection/InternalStreamConnection.java | 58 ++- .../internal/async/AsyncFunctionsTest.java | 348 +++--------------- .../async/AsyncFunctionsTestAbstract.java | 324 ++++++++++++++++ 5 files changed, 441 insertions(+), 328 deletions(-) create mode 100644 driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestAbstract.java diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java index fcf8d61387d..33e1af001bb 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java @@ -178,6 +178,42 @@ default AsyncRunnable thenRun(final AsyncRunnable runnable) { }; } + /** + * The error check checks if the exception is an instance of the provided class. + * @see #thenRunTryCatchAsyncBlocks(AsyncRunnable, java.util.function.Predicate, AsyncFunction) + */ + default AsyncRunnable thenRunTryCatchAsyncBlocks( + final AsyncRunnable runnable, + final Class exceptionClass, + final AsyncFunction errorFunction) { + return thenRunTryCatchAsyncBlocks(runnable, e -> exceptionClass.isInstance(e), errorFunction); + } + + /** + * Convenience method corresponding to a try-catch block in sync code. + * This MUST be used to properly handle cases where there is code above + * the block, whose errors must not be caught by an ensuing + * {@link #onErrorIf(java.util.function.Predicate, AsyncFunction)}. + * + * @param runnable corresponds to the contents of the try block + * @param errorCheck for matching on an error (or, a more complex condition) + * @param errorFunction corresponds to the contents of the catch block + * @return the composition of this runnable, a runnable that runs the + * provided runnable, followed by (composed with) the error function, which + * is conditional on there being an exception meeting the error check. + */ + default AsyncRunnable thenRunTryCatchAsyncBlocks( + final AsyncRunnable runnable, + final Predicate errorCheck, + final AsyncFunction errorFunction) { + return this.thenRun(c -> { + beginAsync() + .thenRun(runnable) + .onErrorIf(errorCheck, errorFunction) + .finish(c); + }); + } + /** * @param condition the condition to check * @param runnable The async runnable to run after this runnable, diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java index ccf80716a23..dc0df6ac27e 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java @@ -61,7 +61,8 @@ static BsonDocument executeCommandWithoutCheckingForFailure(final String databas static void executeCommandAsync(final String database, final BsonDocument command, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi, final InternalConnection internalConnection, final SingleResultCallback callback) { - internalConnection.sendAndReceiveAsync(getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi), + internalConnection.sendAndReceiveAsync( + getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi), new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext(), (result, t) -> { if (t != null) { diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java index 218835f083e..fc90ce81bef 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java @@ -633,19 +633,34 @@ private T getCommandResult(final Decoder decoder, final ResponseBuffers r @Override public void sendMessage(final List byteBuffers, final int lastRequestId) { notNull("stream is open", stream); - if (isClosed()) { throw new MongoSocketClosedException("Cannot write to a closed stream", getServerAddress()); } - try { stream.write(byteBuffers); } catch (Exception e) { close(); - throw translateWriteException(e); + throwTranslatedWriteException(e); } } + @Override + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, + final SingleResultCallback callback) { + beginAsync().thenRun((c) -> { + notNull("stream is open", stream); + if (isClosed()) { + throw new MongoSocketClosedException("Cannot write to a closed stream", getServerAddress()); + } + c.complete(c); + }).thenRunTryCatchAsyncBlocks(c -> { + stream.writeAsync(byteBuffers, c.asHandler()); + }, Exception.class, (e, c) -> { + close(); + throwTranslatedWriteException(e); + }).finish(errorHandlingCallback(callback, LOGGER)); + } + @Override public ResponseBuffers receiveMessage(final int responseTo) { assertNotNull(stream); @@ -665,39 +680,6 @@ private ResponseBuffers receiveMessageWithAdditionalTimeout(final int additional } } - @Override - public void sendMessageAsync(final List byteBuffers, final int lastRequestId, - final SingleResultCallback callback) { - assertNotNull(stream); - - if (isClosed()) { - callback.onResult(null, new MongoSocketClosedException("Can not read from a closed socket", getServerAddress())); - return; - } - - writeAsync(byteBuffers, errorHandlingCallback(callback, LOGGER)); - } - - private void writeAsync(final List byteBuffers, final SingleResultCallback callback) { - try { - stream.writeAsync(byteBuffers, new AsyncCompletionHandler() { - @Override - public void completed(@Nullable final Void v) { - callback.onResult(null, null); - } - - @Override - public void failed(final Throwable t) { - close(); - callback.onResult(null, translateWriteException(t)); - } - }); - } catch (Throwable t) { - close(); - callback.onResult(null, t); - } - } - @Override public void receiveMessageAsync(final int responseTo, final SingleResultCallback callback) { assertNotNull(stream); @@ -762,6 +744,10 @@ private void updateSessionContext(final SessionContext sessionContext, final Res } } + private void throwTranslatedWriteException(final Throwable e) { + throw translateWriteException(e); + } + private MongoException translateWriteException(final Throwable e) { if (e instanceof MongoException) { return (MongoException) e; diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java index b783b3de93b..deb8e4a2e4a 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java @@ -15,30 +15,17 @@ */ package com.mongodb.internal.async; -import com.mongodb.client.TestListener; import org.junit.jupiter.api.Test; -import org.opentest4j.AssertionFailedError; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.internal.async.AsyncRunnable.beginAsync; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -final class AsyncFunctionsTest { - private final TestListener listener = new TestListener(); - private final InvocationTracker invocationTracker = new InvocationTracker(); - private boolean isTestingAbruptCompletion = false; +final class AsyncFunctionsTest extends AsyncFunctionsTestAbstract { @Test void test1Method() { @@ -393,8 +380,8 @@ void testTryCatch() { // chain of 2 in try. // WARNING: "onErrorIf" will consider everything in // the preceding chain to be part of the try. - // Use nested async chains to define the beginning - // of the "try". + // Use nested async chains, or convenience methods, + // to define the beginning of the try. assertBehavesSameVariations(5, () -> { try { @@ -491,6 +478,56 @@ void testTryCatch() { }); } + @Test + void testTryCatchHelper() { + assertBehavesSameVariations(4, + () -> { + plain(0); + try { + sync(1); + } catch (Throwable t) { + plain(2); + throw t; + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(0); + c.complete(c); + }).thenRunTryCatchAsyncBlocks(c -> { + async(1, c); + }, Throwable.class, (t, c) -> { + plain(2); + c.completeExceptionally(t); + }).finish(callback); + }); + + assertBehavesSameVariations(5, + () -> { + plain(0); + try { + sync(1); + } catch (Throwable t) { + plain(2); + throw t; + } + sync(4); + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(0); + c.complete(c); + }).thenRunTryCatchAsyncBlocks(c -> { + async(1, c); + }, Throwable.class, (t, c) -> { + plain(2); + c.completeExceptionally(t); + }).thenRun(c -> { + async(4, c); + }).finish(callback); + }); + } + @Test void testTryCatchWithVariables() { // using supply etc. @@ -722,8 +759,8 @@ void testVariables() { @Test void testInvalid() { - isTestingAbruptCompletion = false; - invocationTracker.isAsyncStep = true; + setIsTestingAbruptCompletion(false); + setAsyncStep(true); assertThrows(IllegalStateException.class, () -> { beginAsync().thenRun(c -> { async(3, c); @@ -746,8 +783,8 @@ void testDerivation() { // Stand-ins for sync-async methods; these "happily" do not throw // exceptions, to avoid complicating this demo async code. Consumer happySync = (i) -> { - invocationTracker.getNextOption(1); - listener.add("affected-success-" + i); + getNextOption(1); + listenerAdd("affected-success-" + i); }; BiConsumer> happyAsync = (i, c) -> { happySync.accept(i); @@ -827,275 +864,4 @@ void testDerivation() { }); } - // invoked methods: - - private void plain(final int i) { - int cur = invocationTracker.getNextOption(2); - if (cur == 0) { - listener.add("plain-exception-" + i); - throw new RuntimeException("affected method exception-" + i); - } else { - listener.add("plain-success-" + i); - } - } - - private int plainReturns(final int i) { - int cur = invocationTracker.getNextOption(2); - if (cur == 0) { - listener.add("plain-exception-" + i); - throw new RuntimeException("affected method exception-" + i); - } else { - listener.add("plain-success-" + i); - return i; - } - } - - private boolean plainTest(final int i) { - int cur = invocationTracker.getNextOption(3); - if (cur == 0) { - listener.add("plain-exception-" + i); - throw new RuntimeException("affected method exception-" + i); - } else if (cur == 1) { - listener.add("plain-false-" + i); - return false; - } else { - listener.add("plain-true-" + i); - return true; - } - } - - private void sync(final int i) { - assertFalse(invocationTracker.isAsyncStep); - affected(i); - } - - - private Integer syncReturns(final int i) { - assertFalse(invocationTracker.isAsyncStep); - return affectedReturns(i); - } - - private void async(final int i, final SingleResultCallback callback) { - assertTrue(invocationTracker.isAsyncStep); - if (isTestingAbruptCompletion) { - affected(i); - callback.complete(callback); - - } else { - try { - affected(i); - callback.complete(callback); - } catch (Throwable t) { - callback.onResult(null, t); - } - } - } - - private void asyncReturns(final int i, final SingleResultCallback callback) { - assertTrue(invocationTracker.isAsyncStep); - if (isTestingAbruptCompletion) { - callback.complete(affectedReturns(i)); - } else { - try { - callback.complete(affectedReturns(i)); - } catch (Throwable t) { - callback.onResult(null, t); - } - } - } - - private void affected(final int i) { - int cur = invocationTracker.getNextOption(2); - if (cur == 0) { - listener.add("affected-exception-" + i); - throw new RuntimeException("exception-" + i); - } else { - listener.add("affected-success-" + i); - } - } - - private int affectedReturns(final int i) { - int cur = invocationTracker.getNextOption(2); - if (cur == 0) { - listener.add("affected-exception-" + i); - throw new RuntimeException("exception-" + i); - } else { - listener.add("affected-success-" + i); - return i; - } - } - - // assert methods: - - private void assertBehavesSameVariations(final int expectedVariations, final Runnable sync, - final Consumer> async) { - assertBehavesSameVariations(expectedVariations, - () -> { - sync.run(); - return null; - }, - (c) -> { - async.accept((v, e) -> c.onResult(v, e)); - }); - } - - private void assertBehavesSameVariations(final int expectedVariations, final Supplier sync, - final Consumer> async) { - // run the variation-trying code twice, with direct/indirect exceptions - for (int i = 0; i < 2; i++) { - isTestingAbruptCompletion = i != 0; - - // the variation-trying code: - invocationTracker.reset(); - do { - invocationTracker.startInitialStep(); - assertBehavesSame( - sync, - () -> invocationTracker.startMatchStep(), - async); - } while (invocationTracker.countDown()); - assertEquals(expectedVariations, invocationTracker.getVariationCount(), - "number of variations did not match"); - } - - } - - private void assertBehavesSame(final Supplier sync, final Runnable between, - final Consumer> async) { - - T expectedValue = null; - Throwable expectedException = null; - try { - expectedValue = sync.get(); - } catch (Throwable e) { - expectedException = e; - } - List expectedEvents = listener.getEventStrings(); - - listener.clear(); - between.run(); - - AtomicReference actualValue = new AtomicReference<>(); - AtomicReference actualException = new AtomicReference<>(); - AtomicBoolean wasCalled = new AtomicBoolean(false); - try { - async.accept((v, e) -> { - actualValue.set(v); - actualException.set(e); - if (wasCalled.get()) { - fail(); - } - wasCalled.set(true); - }); - } catch (Throwable e) { - fail("async threw instead of using callback"); - } - - // The following code can be used to debug variations: -// System.out.println("===VARIATION START"); -// System.out.println("sync: " + expectedEvents); -// System.out.println("callback called?: " + wasCalled.get()); -// System.out.println("value -- sync: " + expectedValue + " -- async: " + actualValue.get()); -// System.out.println("excep -- sync: " + expectedException + " -- async: " + actualException.get()); -// System.out.println("exception mode: " + (isTestingAbruptCompletion -// ? "exceptions thrown directly (abrupt completion)" : "exceptions into callbacks")); -// System.out.println("===VARIATION END"); - - // show assertion failures arising in async tests - if (actualException.get() != null && actualException.get() instanceof AssertionFailedError) { - throw (AssertionFailedError) actualException.get(); - } - - assertTrue(wasCalled.get(), "callback should have been called"); - assertEquals(expectedEvents, listener.getEventStrings(), "steps should have matched"); - assertEquals(expectedValue, actualValue.get()); - assertEquals(expectedException == null, actualException.get() == null, - "both or neither should have produced an exception"); - if (expectedException != null) { - assertEquals(expectedException.getMessage(), actualException.get().getMessage()); - assertEquals(expectedException.getClass(), actualException.get().getClass()); - } - - listener.clear(); - } - - /** - * Tracks invocations: allows testing of all variations of a method calls - */ - private static class InvocationTracker { - public static final int DEPTH_LIMIT = 50; - private final List invocationOptionSequence = new ArrayList<>(); - private boolean isAsyncStep; // async = matching, vs initial step = populating - private int currentInvocationIndex; - private int variationCount; - - public void reset() { - variationCount = 0; - } - - public void startInitialStep() { - variationCount++; - isAsyncStep = false; - currentInvocationIndex = -1; - } - - public int getNextOption(final int myOptionsSize) { - /* - This method creates (or gets) the next invocation's option. Each - invoker of this method has the "option" to behave in various ways, - usually just success (option 1) and exceptional failure (option 0), - though some callers might have more options. A sequence of method - outcomes (options) is one "variation". Tests automatically test - all possible variations (up to a limit, to prevent infinite loops). - - Methods generally have labels, to ensure that corresponding - sync/async methods are called in the right order, but these labels - are unrelated to the "variation" logic here. There are two "modes" - (whether completion is abrupt, or not), which are also unrelated. - */ - - currentInvocationIndex++; // which invocation result we are dealing with - - if (currentInvocationIndex >= invocationOptionSequence.size()) { - if (isAsyncStep) { - fail("result should have been pre-initialized: steps may not match"); - } - if (isWithinDepthLimit()) { - invocationOptionSequence.add(myOptionsSize - 1); - } else { - invocationOptionSequence.add(0); // choose "0" option, should always be an exception - } - } - return invocationOptionSequence.get(currentInvocationIndex); - } - - public void startMatchStep() { - isAsyncStep = true; - currentInvocationIndex = -1; - } - - private boolean countDown() { - while (!invocationOptionSequence.isEmpty()) { - int lastItemIndex = invocationOptionSequence.size() - 1; - int lastItem = invocationOptionSequence.get(lastItemIndex); - if (lastItem > 0) { - // count current digit down by 1, until 0 - invocationOptionSequence.set(lastItemIndex, lastItem - 1); - return true; - } else { - // current digit completed, remove (move left) - invocationOptionSequence.remove(lastItemIndex); - } - } - return false; - } - - public int getVariationCount() { - return variationCount; - } - - public boolean isWithinDepthLimit() { - return invocationOptionSequence.size() < DEPTH_LIMIT; - } - } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestAbstract.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestAbstract.java new file mode 100644 index 00000000000..7cc8b456f1c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestAbstract.java @@ -0,0 +1,324 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.client.TestListener; +import org.opentest4j.AssertionFailedError; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class AsyncFunctionsTestAbstract { + + private final TestListener listener = new TestListener(); + private final InvocationTracker invocationTracker = new InvocationTracker(); + private boolean isTestingAbruptCompletion = false; + + void setIsTestingAbruptCompletion(final boolean b) { + isTestingAbruptCompletion = b; + } + + public void setAsyncStep(final boolean isAsyncStep) { + invocationTracker.isAsyncStep = isAsyncStep; + } + + public void getNextOption(final int i) { + invocationTracker.getNextOption(i); + } + + public void listenerAdd(final String s) { + listener.add(s); + } + + void plain(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("plain-exception-" + i); + throw new RuntimeException("affected method exception-" + i); + } else { + listener.add("plain-success-" + i); + } + } + + int plainReturns(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("plain-returns-exception-" + i); + throw new RuntimeException("affected method exception-" + i); + } else { + listener.add("plain-returns-success-" + i); + return i; + } + } + + boolean plainTest(final int i) { + int cur = invocationTracker.getNextOption(3); + if (cur == 0) { + listener.add("plain-exception-" + i); + throw new RuntimeException("affected method exception-" + i); + } else if (cur == 1) { + listener.add("plain-false-" + i); + return false; + } else { + listener.add("plain-true-" + i); + return true; + } + } + + void sync(final int i) { + assertFalse(invocationTracker.isAsyncStep); + affected(i); + } + + Integer syncReturns(final int i) { + assertFalse(invocationTracker.isAsyncStep); + return affectedReturns(i); + } + + void async(final int i, final SingleResultCallback callback) { + assertTrue(invocationTracker.isAsyncStep); + if (isTestingAbruptCompletion) { + affected(i); + callback.complete(callback); + + } else { + try { + affected(i); + callback.complete(callback); + } catch (Throwable t) { + callback.onResult(null, t); + } + } + } + + void asyncReturns(final int i, final SingleResultCallback callback) { + assertTrue(invocationTracker.isAsyncStep); + if (isTestingAbruptCompletion) { + callback.complete(affectedReturns(i)); + } else { + try { + callback.complete(affectedReturns(i)); + } catch (Throwable t) { + callback.onResult(null, t); + } + } + } + + private void affected(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("affected-exception-" + i); + throw new RuntimeException("exception-" + i); + } else { + listener.add("affected-success-" + i); + } + } + + private int affectedReturns(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("affected-returns-exception-" + i); + throw new RuntimeException("exception-" + i); + } else { + listener.add("affected-returns-success-" + i); + return i; + } + } + + // assert methods: + + void assertBehavesSameVariations(final int expectedVariations, final Runnable sync, + final Consumer> async) { + assertBehavesSameVariations(expectedVariations, + () -> { + sync.run(); + return null; + }, + (c) -> { + async.accept((v, e) -> c.onResult(v, e)); + }); + } + + void assertBehavesSameVariations(final int expectedVariations, final Supplier sync, + final Consumer> async) { + // run the variation-trying code twice, with direct/indirect exceptions + for (int i = 0; i < 2; i++) { + isTestingAbruptCompletion = i != 0; + + // the variation-trying code: + invocationTracker.reset(); + do { + invocationTracker.startInitialStep(); + assertBehavesSame( + sync, + () -> invocationTracker.startMatchStep(), + async); + } while (invocationTracker.countDown()); + assertEquals(expectedVariations, invocationTracker.getVariationCount(), + "number of variations did not match"); + } + + } + + private void assertBehavesSame(final Supplier sync, final Runnable between, + final Consumer> async) { + + T expectedValue = null; + Throwable expectedException = null; + try { + expectedValue = sync.get(); + } catch (Throwable e) { + expectedException = e; + } + List expectedEvents = listener.getEventStrings(); + + listener.clear(); + between.run(); + + AtomicReference actualValue = new AtomicReference<>(); + AtomicReference actualException = new AtomicReference<>(); + AtomicBoolean wasCalled = new AtomicBoolean(false); + try { + async.accept((v, e) -> { + actualValue.set(v); + actualException.set(e); + if (wasCalled.get()) { + fail(); + } + wasCalled.set(true); + }); + } catch (Throwable e) { + fail("async threw instead of using callback"); + } + + // The following code can be used to debug variations: +// System.out.println("===VARIATION START"); +// System.out.println("sync: " + expectedEvents); +// System.out.println("callback called?: " + wasCalled.get()); +// System.out.println("value -- sync: " + expectedValue + " -- async: " + actualValue.get()); +// System.out.println("excep -- sync: " + expectedException + " -- async: " + actualException.get()); +// System.out.println("exception mode: " + (isTestingAbruptCompletion +// ? "exceptions thrown directly (abrupt completion)" : "exceptions into callbacks")); +// System.out.println("===VARIATION END"); + + // show assertion failures arising in async tests + if (actualException.get() != null && actualException.get() instanceof AssertionFailedError) { + throw (AssertionFailedError) actualException.get(); + } + + assertTrue(wasCalled.get(), "callback should have been called"); + assertEquals(expectedEvents, listener.getEventStrings(), "steps should have matched"); + assertEquals(expectedValue, actualValue.get()); + assertEquals(expectedException == null, actualException.get() == null, + "both or neither should have produced an exception"); + if (expectedException != null) { + assertEquals(expectedException.getMessage(), actualException.get().getMessage()); + assertEquals(expectedException.getClass(), actualException.get().getClass()); + } + + listener.clear(); + } + + /** + * Tracks invocations: allows testing of all variations of a method calls + */ + static class InvocationTracker { + public static final int DEPTH_LIMIT = 50; + private final List invocationOptionSequence = new ArrayList<>(); + private boolean isAsyncStep; // async = matching, vs initial step = populating + private int currentInvocationIndex; + private int variationCount; + + public void reset() { + variationCount = 0; + } + + public void startInitialStep() { + variationCount++; + isAsyncStep = false; + currentInvocationIndex = -1; + } + + public int getNextOption(final int myOptionsSize) { + /* + This method creates (or gets) the next invocation's option. Each + invoker of this method has the "option" to behave in various ways, + usually just success (option 1) and exceptional failure (option 0), + though some callers might have more options. A sequence of method + outcomes (options) is one "variation". Tests automatically test + all possible variations (up to a limit, to prevent infinite loops). + + Methods generally have labels, to ensure that corresponding + sync/async methods are called in the right order, but these labels + are unrelated to the "variation" logic here. There are two "modes" + (whether completion is abrupt, or not), which are also unrelated. + */ + + currentInvocationIndex++; // which invocation result we are dealing with + + if (currentInvocationIndex >= invocationOptionSequence.size()) { + if (isAsyncStep) { + fail("result should have been pre-initialized: steps may not match"); + } + if (isWithinDepthLimit()) { + invocationOptionSequence.add(myOptionsSize - 1); + } else { + invocationOptionSequence.add(0); // choose "0" option, should always be an exception + } + } + return invocationOptionSequence.get(currentInvocationIndex); + } + + public void startMatchStep() { + isAsyncStep = true; + currentInvocationIndex = -1; + } + + private boolean countDown() { + while (!invocationOptionSequence.isEmpty()) { + int lastItemIndex = invocationOptionSequence.size() - 1; + int lastItem = invocationOptionSequence.get(lastItemIndex); + if (lastItem > 0) { + // count current digit down by 1, until 0 + invocationOptionSequence.set(lastItemIndex, lastItem - 1); + return true; + } else { + // current digit completed, remove (move left) + invocationOptionSequence.remove(lastItemIndex); + } + } + return false; + } + + public int getVariationCount() { + return variationCount; + } + + public boolean isWithinDepthLimit() { + return invocationOptionSequence.size() < DEPTH_LIMIT; + } + } +} From f1f686b74a20aefb89c2d312856c0a801bfe216f Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Wed, 29 May 2024 17:36:47 -0600 Subject: [PATCH 20/90] OIDC admin credentials (#1389) JAVA-5450 --- .evergreen/run-mongodb-oidc-test.sh | 7 ++++++- .../functional/com/mongodb/client/unified/Entities.java | 6 ++++++ .../internal/connection/OidcAuthenticationProseTests.java | 4 +++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 1f5c1b310cc..ec2b2c19610 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -34,7 +34,12 @@ fi which java export OIDC_TESTS_ENABLED=true -./gradlew -Dorg.mongodb.test.uri="$MONGODB_URI" \ +# use admin credentials for tests +TO_REPLACE="mongodb://" +REPLACEMENT="mongodb://$OIDC_ADMIN_USER:$OIDC_ADMIN_PWD@" +ADMIN_URI=${MONGODB_URI/$TO_REPLACE/$REPLACEMENT} + +./gradlew -Dorg.mongodb.test.uri="$ADMIN_URI" \ --stacktrace --debug --info --no-build-cache driver-core:cleanTest \ driver-sync:test --tests OidcAuthenticationProseTests --tests UnifiedAuthTest \ driver-reactive-streams:test --tests OidcAuthenticationAsyncProseTests \ diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java index 76e49d68cdb..f3aef9ec257 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java @@ -18,6 +18,7 @@ import com.mongodb.ClientEncryptionSettings; import com.mongodb.ClientSessionOptions; +import com.mongodb.ConnectionString; import com.mongodb.MongoClientSettings; import com.mongodb.MongoCredential; import com.mongodb.ReadConcern; @@ -535,6 +536,11 @@ private void initClient(final BsonDocument entity, final String id, "Unsupported authMechanismProperties for authMechanism: " + value); } + // override the org.mongodb.test.uri connection string + String uri = getenv("MONGODB_URI"); + ConnectionString cs = new ConnectionString(uri); + clientSettingsBuilder.applyConnectionString(cs); + String env = assertNotNull(getenv("OIDC_ENV")); MongoCredential oidcCredential = MongoCredential .createOidcCredential(null) diff --git a/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java index 01d530e9e20..70ab06a08b1 100644 --- a/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java +++ b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java @@ -234,6 +234,8 @@ public void test2p4InvalidClientConfigurationWithCallback() { @Test public void test2p5InvalidAllowedHosts() { + assumeTestEnvironment(); + String uri = "mongodb://localhost/?authMechanism=MONGODB-OIDC&&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:123"; ConnectionString cs = new ConnectionString(uri); MongoCredential credential = assertNotNull(cs.getCredential()) @@ -245,7 +247,7 @@ public void test2p5InvalidAllowedHosts() { .credential(credential) .build(); assertCause(IllegalArgumentException.class, - "ALLOWED_HOSTS must not be specified only when OIDC_HUMAN_CALLBACK is specified", + "ALLOWED_HOSTS must be specified only when OIDC_HUMAN_CALLBACK is specified", () -> { try (MongoClient mongoClient = createMongoClient(settings)) { performFind(mongoClient); From 46fda2f3866191d7d4097b651638951b336633ef Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Thu, 30 May 2024 12:48:20 -0400 Subject: [PATCH 21/90] Remove legacy shell from test scripts (#1404) The legacy shell was only used in AWS authentication tests, so updating those gets rid of the last remaining use of the legacy shell. JAVA-4791 --- .evergreen/.evg.yml | 202 +++++------------------------ .evergreen/run-mongodb-aws-test.sh | 15 +-- 2 files changed, 33 insertions(+), 184 deletions(-) diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index 37b67c6e1e5..9f614abfec2 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -154,7 +154,7 @@ functions: ${PREPARE_SHELL} REQUIRE_API_VERSION=${REQUIRE_API_VERSION} LOAD_BALANCER=${LOAD_BALANCER} MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} \ AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ - INSTALL_LEGACY_SHELL=${INSTALL_LEGACY_SHELL} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update params: @@ -346,241 +346,108 @@ functions: JAVA_VERSION="8" MONGODB_URI="${plain_auth_mongodb_uri}" .evergreen/run-plain-auth-test.sh "add aws auth variables to file": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} - command: shell.exec type: test params: + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + shell: "bash" working_dir: "src" - silent: true script: | - cat < ${DRIVERS_TOOLS}/.evergreen/auth_aws/aws_e2e_setup.json - { - "iam_auth_ecs_account" : "${iam_auth_ecs_account}", - "iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}", - "iam_auth_ecs_account_arn": "arn:aws:iam::557821124784:user/authtest_fargate_user", - "iam_auth_ecs_cluster": "${iam_auth_ecs_cluster}", - "iam_auth_ecs_task_definition": "${iam_auth_ecs_task_definition}", - "iam_auth_ecs_subnet_a": "${iam_auth_ecs_subnet_a}", - "iam_auth_ecs_subnet_b": "${iam_auth_ecs_subnet_b}", - "iam_auth_ecs_security_group": "${iam_auth_ecs_security_group}", - - "iam_auth_assume_aws_account" : "${iam_auth_assume_aws_account}", - "iam_auth_assume_aws_secret_access_key" : "${iam_auth_assume_aws_secret_access_key}", - "iam_auth_assume_role_name" : "${iam_auth_assume_role_name}", - - "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}", - "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}", - "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}", - - "iam_auth_assume_web_role_name": "${iam_auth_assume_web_role_name}", - "iam_web_identity_issuer": "${iam_web_identity_issuer}", - "iam_web_identity_rsa_key": "${iam_web_identity_rsa_key}", - "iam_web_identity_jwks_uri": "${iam_web_identity_jwks_uri}", - "iam_web_identity_token_file": "${iam_web_identity_token_file}" - } - EOF + ${PREPARE_SHELL} + cd $DRIVERS_TOOLS/.evergreen/auth_aws + ./setup_secrets.sh drivers/aws_auth "run aws auth test with regular aws credentials": - command: shell.exec type: test params: - working_dir: "src" shell: "bash" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate-authawsvenv.sh - mongo aws_e2e_regular_aws.js - - command: shell.exec - type: test - params: working_dir: "src" - silent: true script: | - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' - USER=$(urlencode ${iam_auth_ecs_account}) - PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) - MONGODB_URI="mongodb://$USER:$PASS@localhost" - EOF - JAVA_VERSION=${JAVA_VERSION} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} \ - AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} \ - .evergreen/run-mongodb-aws-test.sh + ${PREPARE_SHELL} + JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh regular "run aws auth test with assume role credentials": - command: shell.exec type: test params: - working_dir: "src" shell: "bash" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate-authawsvenv.sh - mongo aws_e2e_assume_role.js - - command: shell.exec - type: test - params: working_dir: "src" - silent: true script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' - USER=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - USER=$(urlencode $USER) - PASS=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - PASS=$(urlencode $PASS) - SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - SESSION_TOKEN=$(urlencode $SESSION_TOKEN) - MONGODB_URI="mongodb://$USER:$PASS@localhost" - EOF - JAVA_VERSION=${JAVA_VERSION} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} DRIVERS_TOOLS=${DRIVERS_TOOLS} \ - AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} \ - .evergreen/run-mongodb-aws-test.sh + ${PREPARE_SHELL} + JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh assume-role "run aws auth test with aws EC2 credentials": - command: shell.exec type: test params: - working_dir: "src" shell: "bash" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate-authawsvenv.sh - mongo aws_e2e_ec2.js - - command: shell.exec - type: test - params: working_dir: "src" - shell: "bash" script: | ${PREPARE_SHELL} - # Write an empty prepare_mongodb_aws so no auth environment variables are set. - echo "" > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh + if [ "${SKIP_EC2_AUTH_TEST}" = "true" ]; then + echo "This platform does not support the EC2 auth test, skipping..." + exit 0 + fi + JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh ec2 "run aws auth test with web identity credentials": - command: shell.exec type: test params: - working_dir: "src" - shell: "bash" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate-authawsvenv.sh - mongo aws_e2e_web_identity.js - - command: shell.exec - type: test - params: - working_dir: "src" shell: "bash" - silent: true - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - export AWS_ROLE_ARN="${iam_auth_assume_web_role_name}" - export AWS_WEB_IDENTITY_TOKEN_FILE="${iam_web_identity_token_file}" - EOF - - command: shell.exec - type: test - params: working_dir: "src" - shell: "bash" script: | ${PREPARE_SHELL} if [ "${AWS_CREDENTIAL_PROVIDER}" = "builtIn" ]; then echo "Built-in AWS credential provider does not support the web identity auth test, skipping..." exit 0 fi - JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + if [ "${SKIP_WEB_IDENTITY_AUTH_TEST}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi + JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh web-identity - command: shell.exec type: test params: - working_dir: "src" shell: "bash" - silent: true - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - export AWS_ROLE_ARN="${iam_auth_assume_web_role_name}" - export AWS_WEB_IDENTITY_TOKEN_FILE="${iam_web_identity_token_file}" - export AWS_ROLE_SESSION_NAME="test" - EOF - - command: shell.exec - type: test - params: working_dir: "src" - shell: "bash" script: | ${PREPARE_SHELL} if [ "${AWS_CREDENTIAL_PROVIDER}" = "builtIn" ]; then echo "Built-in AWS credential provider does not support the web identity auth test, skipping..." exit 0 fi - JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + if [ "${SKIP_WEB_IDENTITY_AUTH_TEST}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi + export AWS_ROLE_SESSION_NAME="test" + JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh web-identity "run aws auth test with aws credentials as environment variables": - command: shell.exec type: test params: - working_dir: "src" shell: "bash" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate-authawsvenv.sh - mongo aws_e2e_regular_aws.js - - command: shell.exec - type: test - params: - working_dir: "src" - silent: true - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - export AWS_ACCESS_KEY_ID=${iam_auth_ecs_account} - export AWS_SECRET_ACCESS_KEY=${iam_auth_ecs_secret_access_key} - EOF - - command: shell.exec - type: test - params: working_dir: "src" script: | ${PREPARE_SHELL} - JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh + JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh env-creds "run aws auth test with aws credentials and session token as environment variables": - command: shell.exec type: test params: - working_dir: "src" shell: "bash" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate-authawsvenv.sh - mongo aws_e2e_assume_role.js - - command: shell.exec - type: test - params: - working_dir: "src" - silent: true - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - export AWS_ACCESS_KEY_ID=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - export AWS_SECRET_ACCESS_KEY=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - export AWS_SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - EOF - - command: shell.exec - type: test - params: working_dir: "src" script: | ${PREPARE_SHELL} - JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh + JAVA_VERSION=${JAVA_VERSION} AWS_CREDENTIAL_PROVIDER=${AWS_CREDENTIAL_PROVIDER} .evergreen/run-mongodb-aws-test.sh session-creds "run aws ECS auth test": - command: shell.exec @@ -1063,7 +930,6 @@ tasks: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - INSTALL_LEGACY_SHELL: "true" - func: "add aws auth variables to file" - func: "run aws auth test with regular aws credentials" @@ -1074,7 +940,6 @@ tasks: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - INSTALL_LEGACY_SHELL: "true" - func: "add aws auth variables to file" - func: "run aws auth test with assume role credentials" @@ -1085,7 +950,6 @@ tasks: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - INSTALL_LEGACY_SHELL: "true" - func: "add aws auth variables to file" - func: "run aws auth test with aws credentials as environment variables" @@ -1096,7 +960,6 @@ tasks: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - INSTALL_LEGACY_SHELL: "true" - func: "add aws auth variables to file" - func: "run aws auth test with aws credentials and session token as environment variables" @@ -1107,7 +970,6 @@ tasks: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - INSTALL_LEGACY_SHELL: "true" - func: "add aws auth variables to file" - func: "run aws auth test with aws EC2 credentials" @@ -1118,7 +980,6 @@ tasks: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - INSTALL_LEGACY_SHELL: "true" - func: "add aws auth variables to file" - func: "run aws auth test with web identity credentials" @@ -1129,7 +990,6 @@ tasks: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - INSTALL_LEGACY_SHELL: "true" - func: "add aws auth variables to file" - func: "run aws ECS auth test" diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index ff20ded9936..45c36227a63 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -15,19 +15,8 @@ RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" echo "Running MONGODB-AWS authentication tests" - -# ensure no secrets are printed in log files -set +x - -# load the script -shopt -s expand_aliases # needed for `urlencode` alias -[ -s "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" ] && source "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - -MONGODB_URI=${MONGODB_URI:-"mongodb://localhost"} -MONGODB_URI="${MONGODB_URI}/aws?authMechanism=MONGODB-AWS" -if [[ -n ${SESSION_TOKEN} ]]; then - MONGODB_URI="${MONGODB_URI}&authMechanismProperties=AWS_SESSION_TOKEN:${SESSION_TOKEN}" -fi +# Handle credentials and environment setup. +. $DRIVERS_TOOLS/.evergreen/auth_aws/aws_setup.sh $1 # show test output set -x From 339bd2cabe4baa7a624588978400a0777d257e9b Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Mon, 3 Jun 2024 10:10:33 -0600 Subject: [PATCH 22/90] Create and upload `ssdlc_compliance_report.md` (#1405) JAVA-5435 --- .evergreen/.evg.yml | 66 +++++++++++++---- .evergreen/ssdlc-report.sh | 63 +++++++++++++++-- .../template_ssdlc_compliance_report.md | 70 +++++++++++++++++++ 3 files changed, 179 insertions(+), 20 deletions(-) create mode 100644 .evergreen/template_ssdlc_compliance_report.md diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index 9f614abfec2..c0bceb90c70 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -142,6 +142,45 @@ functions: content_type: ${content_type|text/plain} display_name: "orchestration.log" + "create and upload SSDLC release assets": + - command: shell.exec + shell: "bash" + params: + working_dir: "src" + env: + PRODUCT_NAME: ${product_name} + PRODUCT_VERSION: ${product_version} + script: .evergreen/ssdlc-report.sh + - command: ec2.assume_role + params: + role_arn: ${UPLOAD_SSDLC_RELEASE_ASSETS_ROLE_ARN} + - command: s3.put + params: + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + local_file: ./src/build/ssdlc/ssdlc_compliance_report.md + remote_file: ${product_name}/${product_version}/ssdlc_compliance_report.md + bucket: java-driver-release-assets + region: us-west-1 + permissions: private + content_type: text/markdown + display_name: ssdlc_compliance_report.md + - command: s3.put + params: + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + local_files_include_filter: + - build/ssdlc/static-analysis-reports/*.sarif + local_files_include_filter_prefix: ./src/ + remote_file: ${product_name}/${product_version}/static-analysis-reports/ + bucket: java-driver-release-assets + region: us-west-1 + permissions: private + content_type: application/sarif+json + display_name: + "upload test results": - command: attach.xunit_results params: @@ -692,24 +731,21 @@ functions: params: working_dir: "src" script: | - tag=$(git describe --tags --always --dirty) - - # remove the leading 'r' - version=$(echo -n "$tag" | cut -c 2-) - - cat < trace-expansions.yml - release_version: "$version" - EOT - cat trace-expansions.yml + PRODUCT_VERSION="$(echo -n "$(git describe --tags --always --dirty)" | cut -c 2-)" + cat > ssdlc-expansions.yml <. + + + + + + + + + + + + + + +
    Product name${product_name}
    Product version${product_version}
    Report date, UTC${report_date_utc}
    + +## Release creator + +This information is available in multiple ways: + + + + + + + + + + +
    Evergreen + Go to + + https://evergreen.mongodb.com/waterfall/mongo-java-driver?bv_filter=Publish%20Release, + find the build triggered from Git tag r${product_version}, see who authored it. +
    Papertrail + Refer to data in Papertrail. There is currently no official way to serve that data. +
    + +## Process document + +Blocked on . + +The MongoDB SSDLC policy is available at +. + +## Third-darty dependency information + +There are no dependencies to report vulnerabilities of. +Our [SBOM](https://docs.devprod.prod.corp.mongodb.com/mms/python/src/sbom/silkbomb/docs/CYCLONEDX/) lite +is . + +## Static analysis findings + +The static analysis findings are all available at +. +All the findings in the aforementioned reports +are either of the MongoDB status "False Positive" or "No Fix Needed", +because code that has any other findings cannot technically get into the product. + + may also be of interest. + +## Signature information + +The product artifacts are signed. +The signatures can be verified by following instructions at +. From da30e52b8ed256ad965f4da68551c778babfd73a Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Wed, 5 Jun 2024 05:55:03 -0700 Subject: [PATCH 23/90] Optimize GridFS throughput by removing redundant byte array cloning. (#1402) JAVA-5485 Co-authored-by: Ross Lawley Co-authored-by: Jeff Yemin --- .../client/gridfs/GridFSBucketImpl.java | 8 +- .../gridfs/GridFSDownloadStreamImpl.java | 35 ++++---- .../client/gridfs/GridFSUploadStreamImpl.java | 16 ++-- .../gridfs/GridFSBucketSpecification.groovy | 33 +++++--- .../GridFSDownloadStreamSpecification.groovy | 80 ++++++++++++------- .../GridFSUploadStreamSpecification.groovy | 15 ++-- 6 files changed, 108 insertions(+), 79 deletions(-) diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java index f365bd2980a..963093af6f7 100644 --- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java @@ -57,7 +57,7 @@ final class GridFSBucketImpl implements GridFSBucket { private final String bucketName; private final int chunkSizeBytes; private final MongoCollection filesCollection; - private final MongoCollection chunksCollection; + private final MongoCollection chunksCollection; private volatile boolean checkedIndexes; GridFSBucketImpl(final MongoDatabase database) { @@ -71,7 +71,7 @@ final class GridFSBucketImpl implements GridFSBucket { } GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection filesCollection, - final MongoCollection chunksCollection) { + final MongoCollection chunksCollection) { this.bucketName = notNull("bucketName", bucketName); this.chunkSizeBytes = chunkSizeBytes; this.filesCollection = notNull("filesCollection", filesCollection); @@ -459,8 +459,8 @@ private static MongoCollection getFilesCollection(final MongoDatabas ); } - private static MongoCollection getChunksCollection(final MongoDatabase database, final String bucketName) { - return database.getCollection(bucketName + ".chunks").withCodecRegistry(MongoClientSettings.getDefaultCodecRegistry()); + private static MongoCollection getChunksCollection(final MongoDatabase database, final String bucketName) { + return database.getCollection(bucketName + ".chunks", BsonDocument.class).withCodecRegistry(MongoClientSettings.getDefaultCodecRegistry()); } private void checkCreateIndex(@Nullable final ClientSession clientSession) { diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java index 16f0bcd7fd3..c9f6607d144 100644 --- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java @@ -23,9 +23,10 @@ import com.mongodb.client.MongoCursor; import com.mongodb.client.gridfs.model.GridFSFile; import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; import org.bson.BsonValue; -import org.bson.Document; -import org.bson.types.Binary; import java.util.concurrent.locks.ReentrantLock; @@ -37,12 +38,12 @@ class GridFSDownloadStreamImpl extends GridFSDownloadStream { private final ClientSession clientSession; private final GridFSFile fileInfo; - private final MongoCollection chunksCollection; + private final MongoCollection chunksCollection; private final BsonValue fileId; private final long length; private final int chunkSizeInBytes; private final int numberOfChunks; - private MongoCursor cursor; + private MongoCursor cursor; private int batchSize; private int chunkIndex; private int bufferOffset; @@ -55,10 +56,10 @@ class GridFSDownloadStreamImpl extends GridFSDownloadStream { private boolean closed = false; GridFSDownloadStreamImpl(@Nullable final ClientSession clientSession, final GridFSFile fileInfo, - final MongoCollection chunksCollection) { + final MongoCollection chunksCollection) { this.clientSession = clientSession; this.fileInfo = notNull("file information", fileInfo); - this.chunksCollection = notNull("chunks collection", chunksCollection); + this.chunksCollection = notNull("chunks collection", chunksCollection); fileId = fileInfo.getId(); length = fileInfo.getLength(); @@ -213,17 +214,17 @@ private void discardCursor() { } @Nullable - private Document getChunk(final int startChunkIndex) { + private BsonDocument getChunk(final int startChunkIndex) { if (cursor == null) { cursor = getCursor(startChunkIndex); } - Document chunk = null; + BsonDocument chunk = null; if (cursor.hasNext()) { chunk = cursor.next(); if (batchSize == 1) { discardCursor(); } - if (chunk.getInteger("n") != startChunkIndex) { + if (chunk.getInt32("n").getValue() != startChunkIndex) { throw new MongoGridFSException(format("Could not find file chunk for file_id: %s at chunk index %s.", fileId, startChunkIndex)); } @@ -232,28 +233,28 @@ private Document getChunk(final int startChunkIndex) { return chunk; } - private MongoCursor getCursor(final int startChunkIndex) { - FindIterable findIterable; - Document filter = new Document("files_id", fileId).append("n", new Document("$gte", startChunkIndex)); + private MongoCursor getCursor(final int startChunkIndex) { + FindIterable findIterable; + BsonDocument filter = new BsonDocument("files_id", fileId).append("n", new BsonDocument("$gte", new BsonInt32(startChunkIndex))); if (clientSession != null) { findIterable = chunksCollection.find(clientSession, filter); } else { findIterable = chunksCollection.find(filter); } - return findIterable.batchSize(batchSize).sort(new Document("n", 1)).iterator(); + return findIterable.batchSize(batchSize).sort(new BsonDocument("n", new BsonInt32(1))).iterator(); } - private byte[] getBufferFromChunk(@Nullable final Document chunk, final int expectedChunkIndex) { + private byte[] getBufferFromChunk(@Nullable final BsonDocument chunk, final int expectedChunkIndex) { - if (chunk == null || chunk.getInteger("n") != expectedChunkIndex) { + if (chunk == null || chunk.getInt32("n").getValue() != expectedChunkIndex) { throw new MongoGridFSException(format("Could not find file chunk for file_id: %s at chunk index %s.", fileId, expectedChunkIndex)); } - if (!(chunk.get("data") instanceof Binary)) { + if (!(chunk.get("data") instanceof BsonBinary)) { throw new MongoGridFSException("Unexpected data format for the chunk"); } - byte[] data = chunk.get("data", Binary.class).getData(); + byte[] data = chunk.getBinary("data").getData(); long expectedDataLength = 0; boolean extraChunk = false; diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java index ff359e34781..26ef5f85934 100644 --- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java @@ -21,9 +21,11 @@ import com.mongodb.client.MongoCollection; import com.mongodb.client.gridfs.model.GridFSFile; import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; import org.bson.BsonValue; import org.bson.Document; -import org.bson.types.Binary; import org.bson.types.ObjectId; import java.util.Date; @@ -35,7 +37,7 @@ final class GridFSUploadStreamImpl extends GridFSUploadStream { private final ClientSession clientSession; private final MongoCollection filesCollection; - private final MongoCollection chunksCollection; + private final MongoCollection chunksCollection; private final BsonValue fileId; private final String filename; private final int chunkSizeBytes; @@ -49,7 +51,7 @@ final class GridFSUploadStreamImpl extends GridFSUploadStream { private boolean closed = false; GridFSUploadStreamImpl(@Nullable final ClientSession clientSession, final MongoCollection filesCollection, - final MongoCollection chunksCollection, final BsonValue fileId, final String filename, + final MongoCollection chunksCollection, final BsonValue fileId, final String filename, final int chunkSizeBytes, @Nullable final Document metadata) { this.clientSession = clientSession; this.filesCollection = notNull("files collection", filesCollection); @@ -160,23 +162,23 @@ public void close() { private void writeChunk() { if (bufferOffset > 0) { if (clientSession != null) { - chunksCollection.insertOne(clientSession, new Document("files_id", fileId).append("n", chunkIndex) + chunksCollection.insertOne(clientSession, new BsonDocument("files_id", fileId).append("n", new BsonInt32(chunkIndex)) .append("data", getData())); } else { - chunksCollection.insertOne(new Document("files_id", fileId).append("n", chunkIndex).append("data", getData())); + chunksCollection.insertOne(new BsonDocument("files_id", fileId).append("n", new BsonInt32(chunkIndex)).append("data", getData())); } chunkIndex++; bufferOffset = 0; } } - private Binary getData() { + private BsonBinary getData() { if (bufferOffset < chunkSizeBytes) { byte[] sizedBuffer = new byte[bufferOffset]; System.arraycopy(buffer, 0, sizedBuffer, 0, bufferOffset); buffer = sizedBuffer; } - return new Binary(buffer); + return new BsonBinary(buffer); } private void checkClosed() { diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy index 32c03ce2bbc..7ae3e568bf4 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy @@ -35,12 +35,13 @@ import com.mongodb.client.result.DeleteResult import com.mongodb.client.result.UpdateResult import com.mongodb.internal.operation.BatchCursor import com.mongodb.internal.operation.FindOperation +import org.bson.BsonBinary import org.bson.BsonDocument +import org.bson.BsonInt32 import org.bson.BsonObjectId import org.bson.BsonString import org.bson.Document import org.bson.codecs.DocumentCodecProvider -import org.bson.types.Binary import org.bson.types.ObjectId import spock.lang.Specification import spock.lang.Unroll @@ -327,7 +328,9 @@ class GridFSBucketSpecification extends Specification { def findIterable = Mock(FindIterable) def filesCollection = Mock(MongoCollection) def tenBytes = new byte[10] - def chunkDocument = new Document('files_id', fileInfo.getId()).append('n', 0).append('data', new Binary(tenBytes)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(tenBytes)) def chunksCollection = Mock(MongoCollection) def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) def outputStream = new ByteArrayOutputStream(10) @@ -346,7 +349,7 @@ class GridFSBucketSpecification extends Specification { } else { 1 * filesCollection.find() >> findIterable } - 1 * findIterable.filter(new Document('_id', bsonFileId)) >> findIterable + 1 * findIterable.filter(new BsonDocument('_id', bsonFileId)) >> findIterable 1 * findIterable.first() >> fileInfo then: @@ -376,7 +379,9 @@ class GridFSBucketSpecification extends Specification { def findIterable = Mock(FindIterable) def filesCollection = Mock(MongoCollection) def tenBytes = new byte[10] - def chunkDocument = new Document('files_id', fileInfo.getId()).append('n', 0).append('data', new Binary(tenBytes)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(tenBytes)) def chunksCollection = Mock(MongoCollection) def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) def outputStream = new ByteArrayOutputStream(10) @@ -395,7 +400,7 @@ class GridFSBucketSpecification extends Specification { } else { 1 * filesCollection.find() >> findIterable } - 1 * findIterable.filter(new Document('_id', bsonFileId)) >> findIterable + 1 * findIterable.filter(new BsonDocument('_id', bsonFileId)) >> findIterable 1 * findIterable.first() >> fileInfo then: @@ -424,11 +429,13 @@ class GridFSBucketSpecification extends Specification { def bsonFileId = new BsonObjectId(fileId) def fileInfo = new GridFSFile(bsonFileId, filename, 10, 255, new Date(), new Document()) def mongoCursor = Mock(MongoCursor) - def findIterable = Mock(FindIterable) + def gridFsFileFindIterable = Mock(FindIterable) def findChunkIterable = Mock(FindIterable) def filesCollection = Mock(MongoCollection) def tenBytes = new byte[10] - def chunkDocument = new Document('files_id', fileInfo.getId()).append('n', 0).append('data', new Binary(tenBytes)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(tenBytes)) def chunksCollection = Mock(MongoCollection) def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) def outputStream = new ByteArrayOutputStream(10) @@ -443,14 +450,14 @@ class GridFSBucketSpecification extends Specification { then: if (clientSession != null) { - 1 * filesCollection.find(clientSession) >> findIterable + 1 * filesCollection.find(clientSession) >> gridFsFileFindIterable } else { - 1 * filesCollection.find() >> findIterable + 1 * filesCollection.find() >> gridFsFileFindIterable } - 1 * findIterable.filter(new Document('filename', filename)) >> findIterable - 1 * findIterable.skip(_) >> findIterable - 1 * findIterable.sort(_) >> findIterable - 1 * findIterable.first() >> fileInfo + 1 * gridFsFileFindIterable.filter(new Document('filename', filename)) >> gridFsFileFindIterable + 1 * gridFsFileFindIterable.skip(_) >> gridFsFileFindIterable + 1 * gridFsFileFindIterable.sort(_) >> gridFsFileFindIterable + 1 * gridFsFileFindIterable.first() >> fileInfo if (clientSession != null) { 1 * chunksCollection.find(clientSession, _) >> findChunkIterable diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy index 99e8f7a2167..d39ee094230 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy @@ -22,9 +22,11 @@ import com.mongodb.client.FindIterable import com.mongodb.client.MongoCollection import com.mongodb.client.MongoCursor import com.mongodb.client.gridfs.model.GridFSFile +import org.bson.BsonBinary +import org.bson.BsonDocument +import org.bson.BsonInt32 import org.bson.BsonObjectId import org.bson.Document -import org.bson.types.Binary import org.bson.types.ObjectId import spock.lang.Specification @@ -43,15 +45,16 @@ class GridFSDownloadStreamSpecification extends Specification { when: def twoBytes = new byte[2] def oneByte = new byte[1] - def findQuery = new Document('files_id', fileInfo.getId()).append('n', new Document('$gte', 0)) - def sort = new Document('n', 1) - def chunkDocument = new Document('files_id', fileInfo.getId()) - .append('n', 0) - .append('data', new Binary(twoBytes)) + def findQuery = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(0))) + def sort = new BsonDocument('n', new BsonInt32(1)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(twoBytes)) - def secondChunkDocument = new Document('files_id', fileInfo.getId()) - .append('n', 1) - .append('data', new Binary(oneByte)) + def secondChunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(1)) + .append('data', new BsonBinary(oneByte)) def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) @@ -112,16 +115,19 @@ class GridFSDownloadStreamSpecification extends Specification { when: def twoBytes = new byte[2] def oneByte = new byte[1] - def findQuery = new Document('files_id', fileInfo.getId()).append('n', new Document('$gte', 0)) - def secondFindQuery = new Document('files_id', fileInfo.getId()).append('n', new Document('$gte', 1)) - def sort = new Document('n', 1) - def chunkDocument = new Document('files_id', fileInfo.getId()) - .append('n', 0) - .append('data', new Binary(twoBytes)) - - def secondChunkDocument = new Document('files_id', fileInfo.getId()) - .append('n', 1) - .append('data', new Binary(oneByte)) + def findQuery = new BsonDocument('files_id', fileInfo.getId()).append('n', + new BsonDocument('$gte', + new BsonInt32(0))) + def secondFindQuery = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(1))) + def sort = new BsonDocument('n', new BsonInt32(1)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(twoBytes)) + + def secondChunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(1)) + .append('data', new BsonBinary(oneByte)) def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) @@ -194,13 +200,17 @@ class GridFSDownloadStreamSpecification extends Specification { def firstChunkBytes = 1..32 as byte[] def lastChunkBytes = 33 .. 57 as byte[] - def sort = new Document('n', 1) + def sort = new BsonDocument('n', new BsonInt32(1)) - def findQueries = [new Document('files_id', fileInfo.getId()).append('n', new Document('$gte', 0)), - new Document('files_id', fileInfo.getId()).append('n', new Document('$gte', 131071))] + def findQueries = [new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(0))), + new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(131071)))] def chunkDocuments = - [new Document('files_id', fileInfo.getId()).append('n', 0).append('data', new Binary(firstChunkBytes)), - new Document('files_id', fileInfo.getId()).append('n', 131071).append('data', new Binary(lastChunkBytes))] + [new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)).append('data', new BsonBinary(firstChunkBytes)), + new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(131071)).append('data', new BsonBinary(lastChunkBytes))] def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) @@ -276,7 +286,9 @@ class GridFSDownloadStreamSpecification extends Specification { def expected10Bytes = 11 .. 20 as byte[] def firstChunkBytes = 1..25 as byte[] - def chunkDocument = new Document('files_id', fileInfo.getId()).append('n', 0).append('data', new Binary(firstChunkBytes)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(firstChunkBytes)) def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) @@ -340,8 +352,12 @@ class GridFSDownloadStreamSpecification extends Specification { def secondChunkBytes = 26 .. 50 as byte[] def chunkDocuments = - [new Document('files_id', fileInfo.getId()).append('n', 0).append('data', new Binary(firstChunkBytes)), - new Document('files_id', fileInfo.getId()).append('n', 1).append('data', new Binary(secondChunkBytes))] + [new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(firstChunkBytes)), + new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(1)) + .append('data', new BsonBinary(secondChunkBytes))] def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) @@ -416,7 +432,9 @@ class GridFSDownloadStreamSpecification extends Specification { def fileInfo = new GridFSFile(new BsonObjectId(new ObjectId()), 'filename', 25L, 25, new Date(), new Document()) def chunkBytes = 1..25 as byte[] - def chunkDocument = new Document('files_id', fileInfo.getId()).append('n', 0).append('data', new Binary(chunkBytes)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(chunkBytes)) def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) @@ -584,9 +602,9 @@ class GridFSDownloadStreamSpecification extends Specification { def 'should throw if chunk data differs from the expected'() { given: - def chunkDocument = new Document('files_id', fileInfo.getId()) - .append('n', 0) - .append('data', new Binary(data)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(data)) def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy index cff602d6f9a..e3df2c225e1 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy @@ -20,10 +20,11 @@ import com.mongodb.MongoGridFSException import com.mongodb.client.ClientSession import com.mongodb.client.MongoCollection import com.mongodb.client.gridfs.model.GridFSFile +import org.bson.BsonDocument +import org.bson.BsonInt32 import org.bson.BsonObjectId import org.bson.BsonString import org.bson.Document -import org.bson.types.Binary import spock.lang.Specification class GridFSUploadStreamSpecification extends Specification { @@ -110,18 +111,18 @@ class GridFSUploadStreamSpecification extends Specification { then: if (clientSession != null) { 1 * chunksCollection.insertOne(clientSession) { - verifyAll(it, Document) { + verifyAll(it, BsonDocument) { it.get('files_id') == filesId - it.getInteger('n') == 0 - it.get('data', Binary).getData() == content + it.getInt32('n') == new BsonInt32(0) + it.getBinary('data').getData() == content } } } else { 1 * chunksCollection.insertOne { - verifyAll(it, Document) { + verifyAll(it, BsonDocument) { it.get('files_id') == filesId - it.getInteger('n') == 0 - it.get('data', Binary).getData() == content + it.getInt32('n') == new BsonInt32(0) + it.getBinary('data').getData() == content } } } From 68b54b2f438aec1ebf1b1f2eca37f98333d5b8ea Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Fri, 7 Jun 2024 14:52:37 -0600 Subject: [PATCH 24/90] Disallow comma character in authMechanismProperties (#1408) JAVA-5486 --- .../main/com/mongodb/ConnectionString.java | 39 ++----------------- .../src/main/com/mongodb/MongoCredential.java | 3 ++ .../auth/legacy/connection-string.json | 4 +- .../connection-string/valid-options.json | 19 +++++++++ .../com/mongodb/ConnectionStringUnitTest.java | 14 ------- .../OidcAuthenticationProseTests.java | 2 +- 6 files changed, 28 insertions(+), 53 deletions(-) diff --git a/driver-core/src/main/com/mongodb/ConnectionString.java b/driver-core/src/main/com/mongodb/ConnectionString.java index 34378d4069f..c4e50d88020 100644 --- a/driver-core/src/main/com/mongodb/ConnectionString.java +++ b/driver-core/src/main/com/mongodb/ConnectionString.java @@ -38,7 +38,6 @@ import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -240,9 +239,7 @@ * mechanism (the default). * *
  • {@code authMechanismProperties=PROPERTY_NAME:PROPERTY_VALUE,PROPERTY_NAME2:PROPERTY_VALUE2}: This option allows authentication - * mechanism properties to be set on the connection string. Property values must be percent-encoded individually, when - * special characters are used, including {@code ,} (comma), {@code =}, {@code +}, {@code &}, and {@code %}. The - * entire substring following the {@code =} should not itself be encoded. + * mechanism properties to be set on the connection string. *
  • *
  • {@code gssapiServiceName=string}: This option only applies to the GSSAPI mechanism and is used to alter the service name. * Deprecated, please use {@code authMechanismProperties=SERVICE_NAME:string} instead. @@ -908,7 +905,6 @@ private MongoCredential createCredentials(final Map> option } } - MongoCredential credential = null; if (mechanism != null) { credential = createMongoCredentialWithMechanism(mechanism, userName, password, authSource, gssapiServiceName); @@ -926,9 +922,6 @@ private MongoCredential createCredentials(final Map> option } String key = mechanismPropertyKeyValue[0].trim().toLowerCase(); String value = mechanismPropertyKeyValue[1].trim(); - if (decodeValueOfKeyValuePair(credential.getMechanism())) { - value = urldecode(value); - } if (MECHANISM_KEYS_DISALLOWED_IN_CONNECTION_STRING.contains(key)) { throw new IllegalArgumentException(format("The connection string contains disallowed mechanism properties. " + "'%s' must be set on the credential programmatically.", key)); @@ -944,27 +937,6 @@ private MongoCredential createCredentials(final Map> option return credential; } - private static boolean decodeWholeOptionValue(final boolean isOidc, final String key) { - // The "whole option value" is the entire string following = in an option, - // including separators when the value is a list or list of key-values. - // This is the original parsing behaviour, but implies that users can - // encode separators (much like they might with URL parameters). This - // behaviour implies that users cannot encode "key-value" values that - // contain a comma, because this will (after this "whole value decoding) - // be parsed as a key-value separator, rather than part of a value. - return !(isOidc && key.equals("authmechanismproperties")); - } - - private static boolean decodeValueOfKeyValuePair(@Nullable final String mechanismName) { - // Only authMechanismProperties should be individually decoded, and only - // when the mechanism is OIDC. These will not have been decoded. - return AuthenticationMechanism.MONGODB_OIDC.getMechanismName().equals(mechanismName); - } - - private static boolean isOidc(final List options) { - return options.contains("authMechanism=" + AuthenticationMechanism.MONGODB_OIDC.getMechanismName()); - } - private MongoCredential createMongoCredentialWithMechanism(final AuthenticationMechanism mechanism, final String userName, @Nullable final char[] password, @Nullable final String authSource, @@ -1049,9 +1021,7 @@ private Map> parseOptions(final String optionsPart) { return optionsMap; } - List options = Arrays.asList(optionsPart.split("&|;")); - boolean isOidc = isOidc(options); - for (final String part : options) { + for (final String part : optionsPart.split("&|;")) { if (part.isEmpty()) { continue; } @@ -1063,10 +1033,7 @@ private Map> parseOptions(final String optionsPart) { if (valueList == null) { valueList = new ArrayList<>(1); } - if (decodeWholeOptionValue(isOidc, key)) { - value = urldecode(value); - } - valueList.add(value); + valueList.add(urldecode(value)); optionsMap.put(key, valueList); } else { throw new IllegalArgumentException(format("The connection string contains an invalid option '%s'. " diff --git a/driver-core/src/main/com/mongodb/MongoCredential.java b/driver-core/src/main/com/mongodb/MongoCredential.java index e085ac074f0..8f731027cf4 100644 --- a/driver-core/src/main/com/mongodb/MongoCredential.java +++ b/driver-core/src/main/com/mongodb/MongoCredential.java @@ -267,6 +267,9 @@ public final class MongoCredential { * Mechanism property key for specifying he URI of the target resource (sometimes called the audience), * used in some OIDC environments. * + *

    A TOKEN_RESOURCE with a comma character must be given as a `MongoClient` configuration and not as + * part of the connection string. The TOKEN_RESOURCE value can contain a colon character. + * * @see MongoCredential#ENVIRONMENT_KEY * @see #createOidcCredential(String) * @since 5.1 diff --git a/driver-core/src/test/resources/auth/legacy/connection-string.json b/driver-core/src/test/resources/auth/legacy/connection-string.json index 072dd176dc8..f8b0f9426c1 100644 --- a/driver-core/src/test/resources/auth/legacy/connection-string.json +++ b/driver-core/src/test/resources/auth/legacy/connection-string.json @@ -565,7 +565,7 @@ }, { "description": "should handle a complicated url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", - "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abc%2Cd%25ef%3Ag%26hi", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abcd%25ef%3Ag%26hi", "valid": true, "credential": { "username": "user", @@ -574,7 +574,7 @@ "mechanism": "MONGODB-OIDC", "mechanism_properties": { "ENVIRONMENT": "azure", - "TOKEN_RESOURCE": "abc,d%ef:g&hi" + "TOKEN_RESOURCE": "abcd%ef:g&hi" } } }, diff --git a/driver-core/src/test/resources/connection-string/valid-options.json b/driver-core/src/test/resources/connection-string/valid-options.json index 4c2bded9e72..cb2027f86ff 100644 --- a/driver-core/src/test/resources/connection-string/valid-options.json +++ b/driver-core/src/test/resources/connection-string/valid-options.json @@ -20,6 +20,25 @@ "options": { "authmechanism": "MONGODB-CR" } + }, + { + "description": "Colon in a key value pair", + "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "authmechanismProperties": { + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } } ] } diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java index 6a8d9ff4fc3..bc905c9c6d8 100644 --- a/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java @@ -39,20 +39,6 @@ void defaults() { assertAll(() -> assertNull(connectionStringDefault.getServerMonitoringMode())); } - @Test - public void mustDecodeOidcIndividually() { - String string = "abc,d!@#$%^&*;ef:ghi"; - // encoded tags will fail parsing with an "invalid read preference tag" - // error if decoding is skipped. - String encodedTags = encode("dc:ny,rack:1"); - ConnectionString cs = new ConnectionString( - "mongodb://localhost/?readPreference=primaryPreferred&readPreferenceTags=" + encodedTags - + "&authMechanism=MONGODB-OIDC&authMechanismProperties=" - + "ENVIRONMENT:azure,TOKEN_RESOURCE:" + encode(string)); - MongoCredential credential = Assertions.assertNotNull(cs.getCredential()); - assertEquals(string, credential.getMechanismProperty("TOKEN_RESOURCE", null)); - } - @Test public void mustDecodeNonOidcAsWhole() { // this string allows us to check if there is no double decoding diff --git a/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java index 70ab06a08b1..2d82ecf3d92 100644 --- a/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java +++ b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java @@ -236,7 +236,7 @@ public void test2p4InvalidClientConfigurationWithCallback() { public void test2p5InvalidAllowedHosts() { assumeTestEnvironment(); - String uri = "mongodb://localhost/?authMechanism=MONGODB-OIDC&&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:123"; + String uri = "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:123"; ConnectionString cs = new ConnectionString(uri); MongoCredential credential = assertNotNull(cs.getCredential()) .withMechanismProperty("ALLOWED_HOSTS", Collections.emptyList()); From 3c8b44e710c9d83056abf9a7d51b87fa479d395e Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Tue, 11 Jun 2024 19:32:03 -0400 Subject: [PATCH 25/90] Test updates (#1419) JAVA-5491 --- .../command-logging/unacknowledged-write.json | 1 + .../command-monitoring/unacknowledgedBulkWrite.json | 1 + 2 files changed, 2 insertions(+) diff --git a/driver-core/src/test/resources/unified-test-format/command-logging/unacknowledged-write.json b/driver-core/src/test/resources/unified-test-format/command-logging/unacknowledged-write.json index dad0c0a36a0..0d33c020d54 100644 --- a/driver-core/src/test/resources/unified-test-format/command-logging/unacknowledged-write.json +++ b/driver-core/src/test/resources/unified-test-format/command-logging/unacknowledged-write.json @@ -5,6 +5,7 @@ { "client": { "id": "client", + "useMultipleMongoses": false, "observeLogMessages": { "command": "debug" } diff --git a/driver-core/src/test/resources/unified-test-format/command-monitoring/unacknowledgedBulkWrite.json b/driver-core/src/test/resources/unified-test-format/command-monitoring/unacknowledgedBulkWrite.json index 782cb84a5bf..ed6ceafa5fd 100644 --- a/driver-core/src/test/resources/unified-test-format/command-monitoring/unacknowledgedBulkWrite.json +++ b/driver-core/src/test/resources/unified-test-format/command-monitoring/unacknowledgedBulkWrite.json @@ -5,6 +5,7 @@ { "client": { "id": "client", + "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent", "commandSucceededEvent", From 24b3aff81036e0e76e804c4da1610fd67adcd87a Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Thu, 13 Jun 2024 08:27:31 -0600 Subject: [PATCH 26/90] Add kmip tests, use mongoCrypt snapshot (#1406) JAVA-5300 --- build.gradle | 2 +- .../client/model/vault/DataKeyOptions.java | 3 + .../legacy/azureKMS.json | 11 + .../client-side-encryption/legacy/gcpKMS.json | 11 + .../legacy/kmipKMS.json | 139 ++++++ .../client-side-encryption/createDataKey.json | 64 +++ .../rewrapManyDataKey.json | 453 +++++++++++++++++- 7 files changed, 670 insertions(+), 13 deletions(-) diff --git a/build.gradle b/build.gradle index 2ba15e3daf8..59745250539 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ ext { zstdVersion = '1.5.5-3' awsSdkV2Version = '2.18.9' awsSdkV1Version = '1.12.337' - mongoCryptVersion = '1.8.0' + mongoCryptVersion = '1.10.0-SNAPSHOT' projectReactorVersion = '2022.0.0' junitBomVersion = '5.8.2' logbackVersion = '1.3.14' diff --git a/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java index 90893f87ef1..e9b60dc3771 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java @@ -128,6 +128,9 @@ public List getKeyAltNames() { * omitted, the driver creates a random 96 byte KMIP Secret Data managed object.

  • *
  • endpoint: a String, the endpoint as a host with required port. e.g. "example.com:443". If endpoint is not provided, it * defaults to the required endpoint from the KMS providers map.
  • + *
  • delegated: If true (recommended), the KMIP server will perform + * encryption and decryption. If delegated is not provided, defaults + * to false.
  • *
*

* If the kmsProvider is "local" the masterKey is not applicable. diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/azureKMS.json b/driver-core/src/test/resources/client-side-encryption/legacy/azureKMS.json index afecf40b0a7..b0f5111370b 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/azureKMS.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/azureKMS.json @@ -78,6 +78,17 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/gcpKMS.json b/driver-core/src/test/resources/client-side-encryption/legacy/gcpKMS.json index c2c08b8a232..65f12ec1395 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/gcpKMS.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/gcpKMS.json @@ -78,6 +78,17 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/kmipKMS.json b/driver-core/src/test/resources/client-side-encryption/legacy/kmipKMS.json index 5749d21ab81..349328b4333 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/kmipKMS.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/kmipKMS.json @@ -78,6 +78,17 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" @@ -117,6 +128,38 @@ "altname", "kmip_altname" ] + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + }, + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": "11" + }, + "keyAltNames": [ + "delegated" + ] } ], "tests": [ @@ -218,6 +261,102 @@ ] } } + }, + { + "description": "Insert a document with auto encryption using KMIP delegated KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "kmip": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip_delegated": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ] + } + } } ] } diff --git a/driver-core/src/test/resources/unified-test-format/client-side-encryption/createDataKey.json b/driver-core/src/test/resources/unified-test-format/client-side-encryption/createDataKey.json index 110c726f9a2..f99fa3dbcf3 100644 --- a/driver-core/src/test/resources/unified-test-format/client-side-encryption/createDataKey.json +++ b/driver-core/src/test/resources/unified-test-format/client-side-encryption/createDataKey.json @@ -337,6 +337,70 @@ } ] }, + { + "description": "create datakey with KMIP delegated KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip", + "opts": { + "masterKey": { + "delegated": true + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + }, + "delegated": true + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, { "description": "create datakey with local KMS provider", "operations": [ diff --git a/driver-core/src/test/resources/unified-test-format/client-side-encryption/rewrapManyDataKey.json b/driver-core/src/test/resources/unified-test-format/client-side-encryption/rewrapManyDataKey.json index 6b3c9664a97..8803491dbe9 100644 --- a/driver-core/src/test/resources/unified-test-format/client-side-encryption/rewrapManyDataKey.json +++ b/driver-core/src/test/resources/unified-test-format/client-side-encryption/rewrapManyDataKey.json @@ -246,6 +246,36 @@ "masterKey": { "provider": "local" } + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba5" + }, + "keyAltNames": [ + "kmip_delegated_key" + ], + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip", + "keyId": "11", + "delegated": true + } } ] } @@ -317,8 +347,8 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, "upsertedIds": {}, @@ -440,6 +470,34 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -502,8 +560,8 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, "upsertedIds": {}, @@ -625,6 +683,34 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -689,8 +775,8 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, "upsertedIds": {}, @@ -818,6 +904,36 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -878,8 +994,8 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, "upsertedIds": {}, @@ -1004,6 +1120,35 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -1044,6 +1189,228 @@ } ] }, + { + "description": "rewrap with new KMIP delegated KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip_delegated_key" + } + }, + "opts": { + "provider": "kmip", + "masterKey": { + "delegated": true + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip_delegated_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, { "description": "rewrap with new local KMS provider", "operations": [ @@ -1063,8 +1430,8 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, "upsertedIds": {}, @@ -1180,6 +1547,32 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -1229,8 +1622,8 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 5, - "modifiedCount": 5, + "matchedCount": 6, + "modifiedCount": 6, "deletedCount": 0, "upsertedCount": 0, "upsertedIds": {}, @@ -1294,6 +1687,16 @@ "keyName": "key-name-csfle" } }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba5" + }, + "masterKey": { + "provider": "kmip", + "keyId": "11", + "delegated": true + } + }, { "_id": { "$binary": { @@ -1447,6 +1850,32 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { From 1ce52acb0cac5a5e146701f123116244065bd593 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Thu, 13 Jun 2024 14:38:53 -0400 Subject: [PATCH 27/90] Forward slash in connection string is optional (#1420) JAVA-5166 --- .../main/com/mongodb/ConnectionString.java | 18 ++++---- .../connection-string/invalid-uris.json | 27 +++++++---- .../connection-string/valid-auth.json | 11 ++--- .../connection-string/valid-options.json | 17 +++++++ .../valid-unix_socket-absolute.json | 15 +++++++ .../valid-unix_socket-relative.json | 15 +++++++ .../connection-string/valid-warnings.json | 45 +++++++++++++++++++ .../ConnectionStringSpecification.groovy | 23 +++++++++- .../com/mongodb/ConnectionStringTest.java | 6 +++ .../MongoClientURISpecification.groovy | 8 ---- 10 files changed, 154 insertions(+), 31 deletions(-) diff --git a/driver-core/src/main/com/mongodb/ConnectionString.java b/driver-core/src/main/com/mongodb/ConnectionString.java index c4e50d88020..82c6b3a00ec 100644 --- a/driver-core/src/main/com/mongodb/ConnectionString.java +++ b/driver-core/src/main/com/mongodb/ConnectionString.java @@ -368,16 +368,18 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient // Split out the user and host information String userAndHostInformation; - int idx = unprocessedConnectionString.indexOf("/"); - if (idx == -1) { - if (unprocessedConnectionString.contains("?")) { - throw new IllegalArgumentException("The connection string contains options without trailing slash"); - } + int firstForwardSlashIdx = unprocessedConnectionString.indexOf("/"); + int firstQuestionMarkIdx = unprocessedConnectionString.indexOf("?"); + if (firstQuestionMarkIdx == -1 && firstForwardSlashIdx == -1) { userAndHostInformation = unprocessedConnectionString; unprocessedConnectionString = ""; + } else if (firstQuestionMarkIdx != -1 && (firstForwardSlashIdx == -1 || firstQuestionMarkIdx < firstForwardSlashIdx)) { + // there is a question mark, and there is no slash or the question mark comes before any slash + userAndHostInformation = unprocessedConnectionString.substring(0, firstQuestionMarkIdx); + unprocessedConnectionString = unprocessedConnectionString.substring(firstQuestionMarkIdx); } else { - userAndHostInformation = unprocessedConnectionString.substring(0, idx); - unprocessedConnectionString = unprocessedConnectionString.substring(idx + 1); + userAndHostInformation = unprocessedConnectionString.substring(0, firstForwardSlashIdx); + unprocessedConnectionString = unprocessedConnectionString.substring(firstForwardSlashIdx + 1); } // Split the user and host information @@ -385,7 +387,7 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient String hostIdentifier; String userName = null; char[] password = null; - idx = userAndHostInformation.lastIndexOf("@"); + int idx = userAndHostInformation.lastIndexOf("@"); if (idx > 0) { userInfo = userAndHostInformation.substring(0, idx).replace("+", "%2B"); hostIdentifier = userAndHostInformation.substring(idx + 1); diff --git a/driver-core/src/test/resources/connection-string/invalid-uris.json b/driver-core/src/test/resources/connection-string/invalid-uris.json index 2a182fac7e2..a7accbd27d6 100644 --- a/driver-core/src/test/resources/connection-string/invalid-uris.json +++ b/driver-core/src/test/resources/connection-string/invalid-uris.json @@ -162,15 +162,6 @@ "auth": null, "options": null }, - { - "description": "Missing delimiting slash between hosts and options", - "uri": "mongodb://example.com?w=1", - "valid": false, - "warning": null, - "hosts": null, - "auth": null, - "options": null - }, { "description": "Incomplete key value pair for option", "uri": "mongodb://example.com/?w", @@ -269,6 +260,24 @@ "hosts": null, "auth": null, "options": null + }, + { + "description": "Username with password containing an unescaped percent sign and an escaped one", + "uri": "mongodb://user%20%:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign (non hex digit)", + "uri": "mongodb://user%w:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null } ] } diff --git a/driver-core/src/test/resources/connection-string/valid-auth.json b/driver-core/src/test/resources/connection-string/valid-auth.json index d3cafb029b9..176a54a096a 100644 --- a/driver-core/src/test/resources/connection-string/valid-auth.json +++ b/driver-core/src/test/resources/connection-string/valid-auth.json @@ -242,7 +242,7 @@ }, { "description": "Subdelimiters in user/pass don't need escaping (MONGODB-CR)", - "uri": "mongodb://!$&'()*,;=:!$&'()*,;=@127.0.0.1/admin?authMechanism=MONGODB-CR", + "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=MONGODB-CR", "valid": true, "warning": false, "hosts": [ @@ -253,8 +253,8 @@ } ], "auth": { - "username": "!$&'()*,;=", - "password": "!$&'()*,;=", + "username": "!$&'()*+,;=", + "password": "!$&'()*+,;=", "db": "admin" }, "options": { @@ -284,7 +284,7 @@ }, { "description": "Escaped username (GSSAPI)", - "uri": "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authMechanism=GSSAPI", + "uri": "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com&authMechanism=GSSAPI", "valid": true, "warning": false, "hosts": [ @@ -303,7 +303,8 @@ "authmechanism": "GSSAPI", "authmechanismproperties": { "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" } } }, diff --git a/driver-core/src/test/resources/connection-string/valid-options.json b/driver-core/src/test/resources/connection-string/valid-options.json index cb2027f86ff..3c79fe7ae55 100644 --- a/driver-core/src/test/resources/connection-string/valid-options.json +++ b/driver-core/src/test/resources/connection-string/valid-options.json @@ -21,6 +21,23 @@ "authmechanism": "MONGODB-CR" } }, + { + "description": "Missing delimiting slash between hosts and options", + "uri": "mongodb://example.com?tls=true", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "tls": true + } + }, { "description": "Colon in a key value pair", "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster", diff --git a/driver-core/src/test/resources/connection-string/valid-unix_socket-absolute.json b/driver-core/src/test/resources/connection-string/valid-unix_socket-absolute.json index 5bb02476eb7..66491db13ba 100644 --- a/driver-core/src/test/resources/connection-string/valid-unix_socket-absolute.json +++ b/driver-core/src/test/resources/connection-string/valid-unix_socket-absolute.json @@ -30,6 +30,21 @@ "auth": null, "options": null }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://%2Ftmp%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, { "description": "Unix domain socket (absolute path with spaces in path)", "uri": "mongodb://%2Ftmp%2F %2Fmongodb-27017.sock", diff --git a/driver-core/src/test/resources/connection-string/valid-unix_socket-relative.json b/driver-core/src/test/resources/connection-string/valid-unix_socket-relative.json index 2ce649ffc23..788720920ba 100644 --- a/driver-core/src/test/resources/connection-string/valid-unix_socket-relative.json +++ b/driver-core/src/test/resources/connection-string/valid-unix_socket-relative.json @@ -30,6 +30,21 @@ "auth": null, "options": null }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://rel%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, { "description": "Unix domain socket (relative path with spaces)", "uri": "mongodb://rel%2F %2Fmongodb-27017.sock", diff --git a/driver-core/src/test/resources/connection-string/valid-warnings.json b/driver-core/src/test/resources/connection-string/valid-warnings.json index 87f7248f21e..f0e8288bc73 100644 --- a/driver-core/src/test/resources/connection-string/valid-warnings.json +++ b/driver-core/src/test/resources/connection-string/valid-warnings.json @@ -63,6 +63,51 @@ "options": { "wtimeoutms": 10 } + }, + { + "description": "Empty integer option values are ignored", + "uri": "mongodb://localhost/?maxIdleTimeMS=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Empty boolean option value are ignored", + "uri": "mongodb://localhost/?journal=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Comma in a key value pair causes a warning", + "uri": "mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null } ] } diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ConnectionStringSpecification.groovy index d56aa8a9c7c..72fdf108698 100644 --- a/driver-core/src/test/unit/com/mongodb/ConnectionStringSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringSpecification.groovy @@ -141,6 +141,28 @@ class ConnectionStringSpecification extends Specification { .withWTimeout(5, MILLISECONDS).withJournal(true) } + @Unroll + def 'should treat trailing slash before query parameters as optional'() { + expect: + uri.getApplicationName() == appName + uri.getDatabase() == db + + where: + uri | appName | db + new ConnectionString('mongodb://mongodb.com') | null | null + new ConnectionString('mongodb://mongodb.com?') | null | null + new ConnectionString('mongodb://mongodb.com/') | null | null + new ConnectionString('mongodb://mongodb.com/?') | null | null + new ConnectionString('mongodb://mongodb.com/test') | null | "test" + new ConnectionString('mongodb://mongodb.com/test?') | null | "test" + new ConnectionString('mongodb://mongodb.com/?appName=a1') | "a1" | null + new ConnectionString('mongodb://mongodb.com?appName=a1') | "a1" | null + new ConnectionString('mongodb://mongodb.com/?appName=a1/a2') | "a1/a2" | null + new ConnectionString('mongodb://mongodb.com?appName=a1/a2') | "a1/a2" | null + new ConnectionString('mongodb://mongodb.com/test?appName=a1') | "a1" | "test" + new ConnectionString('mongodb://mongodb.com/test?appName=a1/a2') | "a1/a2" | "test" + } + def 'should correctly parse different UUID representations'() { expect: uri.getUuidRepresentation() == uuidRepresentation @@ -473,7 +495,6 @@ class ConnectionStringSpecification extends Specification { 'has an empty host' | 'mongodb://localhost:27017,,localhost:27019' 'has an malformed IPv6 host' | 'mongodb://[::1' 'has unescaped colons' | 'mongodb://locahost::1' - 'is missing a slash' | 'mongodb://localhost?wTimeout=5' 'contains an invalid port string' | 'mongodb://localhost:twenty' 'contains an invalid port negative' | 'mongodb://localhost:-1' 'contains an invalid port out of range' | 'mongodb://localhost:1000000' diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringTest.java b/driver-core/src/test/unit/com/mongodb/ConnectionStringTest.java index 3b28460e866..80cc9f65e83 100644 --- a/driver-core/src/test/unit/com/mongodb/ConnectionStringTest.java +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringTest.java @@ -29,6 +29,8 @@ import java.util.Collection; import java.util.List; +import static org.junit.Assume.assumeFalse; + // See https://github.com/mongodb/specifications/tree/master/source/connection-string/tests public class ConnectionStringTest extends AbstractConnectionStringTest { public ConnectionStringTest(final String filename, final String description, final String input, final BsonDocument definition) { @@ -37,6 +39,10 @@ public ConnectionStringTest(final String filename, final String description, fin @Test public void shouldPassAllOutcomes() { + // Java driver currently throws an IllegalArgumentException for these tests + assumeFalse(getDescription().equals("Empty integer option values are ignored")); + assumeFalse(getDescription().equals("Comma in a key value pair causes a warning")); + if (getFilename().equals("invalid-uris.json")) { testInvalidUris(); } else if (getFilename().equals("valid-auth.json")) { diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy index 3db0e1a45d8..b187df8dab8 100644 --- a/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy @@ -34,14 +34,6 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS class MongoClientURISpecification extends Specification { - def 'should throw Exception if URI does not have a trailing slash'() { - when: - new MongoClientURI('mongodb://localhost?wtimeoutMS=5') - - then: - thrown(IllegalArgumentException) - } - def 'should not throw an Exception if URI contains an unknown option'() { when: new MongoClientURI('mongodb://localhost/?unknownOption=5') From b8170a300a7f400c6f526f238131d4714bd14f78 Mon Sep 17 00:00:00 2001 From: Andreas Braun Date: Fri, 14 Jun 2024 15:54:28 +0200 Subject: [PATCH 28/90] Add GitHub Actions based release automation (#1400) JAVA-5479 --- .github/workflows/bump-and-tag.sh | 18 ++++++ .github/workflows/release.yml | 98 +++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+) create mode 100755 .github/workflows/bump-and-tag.sh create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/bump-and-tag.sh b/.github/workflows/bump-and-tag.sh new file mode 100755 index 00000000000..84b7567427c --- /dev/null +++ b/.github/workflows/bump-and-tag.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -e + +# This script assumes that release X.Y.Z will always be created from X.Y.Z-SNAPSHOT" +echo "Replace snapshot version with release version ${RELEASE_VERSION} in build.gradle" +sed --in-place "s/version = '.*-SNAPSHOT'/version = '${RELEASE_VERSION}'/g" build.gradle + +echo "Create package commit for ${RELEASE_VERSION}" +git commit -m "Version: bump ${RELEASE_VERSION}" build.gradle + +echo "Create release tag for ${RELEASE_VERSION}" +git tag -a -m "${RELEASE_VERSION}" r${RELEASE_VERSION} + +echo "Bump to snapshot version for ${NEXT_VERSION}" +sed --in-place "s/version = '${RELEASE_VERSION}'/version = '${NEXT_VERSION}-SNAPSHOT'/g" build.gradle + +echo "Create commit for version bump to ${NEXT_VERSION}" +git commit -m "Version: bump ${NEXT_VERSION}-SNAPSHOT" build.gradle diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..cfb5002dc56 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,98 @@ +name: "Release New Version" +run-name: "Release ${{ inputs.version }}" + +on: + workflow_dispatch: + inputs: + version: + description: "The version to be released (e.g. 1.2.3)" + required: true + type: "string" + +jobs: + prepare-release: + environment: release + name: "Prepare release" + runs-on: ubuntu-latest + permissions: + # Write permission for id-token is necessary to generate a new token for the GitHub App + id-token: write + # Write permission for contents is to ensure we're allowed to push to the repository + contents: write + + steps: + - name: "Create release output" + run: echo '🎬 Release process for version ${{ env.RELEASE_VERSION }} started by @${{ github.triggering_actor }}' >> $GITHUB_STEP_SUMMARY + + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + with: + app-id: ${{ vars.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: "Store version numbers in env variables" + # The awk command to increase the version number was copied from + # StackOverflow: https://stackoverflow.com/a/61921674/3959933 + run: | + echo RELEASE_VERSION=${{ inputs.version }} >> $GITHUB_ENV + echo NEXT_VERSION=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF += 1 ; print}') >> $GITHUB_ENV + echo RELEASE_BRANCH=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF = "x" ; print}') >> $GITHUB_ENV + + - name: "Ensure release tag does not already exist" + run: | + if [[ $(git tag -l r${RELEASE_VERSION}) == r${RELEASE_VERSION} ]]; then + echo '❌ Release failed: tag for version ${{ inputs.version }} already exists' >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + # For patch releases (A.B.C where C != 0), we expect the release to be + # triggered from the A.B.x maintenance branch + - name: "Fail if patch release is created from wrong release branch" + if: ${{ !endsWith(inputs.version, '.0') && env.RELEASE_BRANCH != github.ref_name }} + run: | + echo '❌ Release failed due to branch mismatch: expected ${{ inputs.version }} to be released from ${{ env.RELEASE_BRANCH }}, got ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + exit 1 + + # For non-patch releases (A.B.C where C == 0), we expect the release to + # be triggered from master or the A.B.x maintenance branch + - name: "Fail if non-patch release is created from wrong release branch" + if: ${{ endsWith(inputs.version, '.0') && env.RELEASE_BRANCH != github.ref_name && github.ref_name != 'master' }} + run: | + echo '❌ Release failed due to branch mismatch: expected ${{ inputs.version }} to be released from ${{ env.RELEASE_BRANCH }} or master, got ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + exit 1 + + # If a non-patch release is created from a branch other than its + # maintenance branch, create that branch from the current one and push it + - name: "Create new release branch for non-patch release" + if: ${{ endsWith(inputs.version, '.0') && env.RELEASE_BRANCH != github.ref_name }} + run: | + echo '🆕 Creating new release branch ${RELEASE_BRANCH} from ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + git checkout -b ${RELEASE_BRANCH} + + - name: "Set git author information" + run: | + git config user.name "${GIT_AUTHOR_NAME}" + git config user.email "${GIT_AUTHOR_EMAIL}" + + # This step bumps version numbers in build.gradle and creates git artifacts for the release + - name: "Bump version numbers and create release tag" + run: .github/workflows/bump-and-tag.sh + + - name: "Push release branch and tag" + run: | + git push origin ${RELEASE_BRANCH} + git push origin r${{ env.RELEASE_VERSION }} + + - name: "Create draft release with generated changelog" + run: | + echo "RELEASE_URL=$(\ + gh release create r${RELEASE_VERSION} \ + --target ${{ env.RELEASE_BRANCH }} \ + --title "Java Driver ${{ env.RELEASE_VERSION }} ($(date '+%B %d, %Y'))" \ + --generate-notes \ + --draft\ + )" >> "$GITHUB_ENV" + + - name: "Set summary" + run: | + echo '🚀 Created tag and drafted release for version [${{ env.RELEASE_VERSION }}](${{ env.RELEASE_URL }})' >> $GITHUB_STEP_SUMMARY + echo '✍️ You may now update the release notes and publish the release when ready' >> $GITHUB_STEP_SUMMARY From 476af2769a3fecbb7610577a9035e360efc2d5d6 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Fri, 14 Jun 2024 10:34:13 -0400 Subject: [PATCH 29/90] Update Javadoc to reflect that forward slash is optional (#1421) JAVA-5166 --- driver-core/src/main/com/mongodb/ConnectionString.java | 6 ++---- driver-legacy/src/main/com/mongodb/MongoClientURI.java | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/driver-core/src/main/com/mongodb/ConnectionString.java b/driver-core/src/main/com/mongodb/ConnectionString.java index 82c6b3a00ec..17a990ea127 100644 --- a/driver-core/src/main/com/mongodb/ConnectionString.java +++ b/driver-core/src/main/com/mongodb/ConnectionString.java @@ -78,8 +78,7 @@ *

  • {@code :portX} is optional and defaults to :27017 if not provided.
  • *
  • {@code /database} is the name of the database to login to and thus is only relevant if the * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • - *
  • {@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /} - * required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", * but should be considered as deprecated.
  • * @@ -98,8 +97,7 @@ * seed list used to connect, as if each one were provided as host/port pair in a URI using the normal mongodb protocol. *
  • {@code /database} is the name of the database to login to and thus is only relevant if the * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • - *
  • {@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /} - * required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", * but should be considered as deprecated. Additionally with the mongodb+srv protocol, TXT records are looked up from a Domain Name * Server for the given host, and the text value of each one is prepended to any options on the URI itself. Because the last specified diff --git a/driver-legacy/src/main/com/mongodb/MongoClientURI.java b/driver-legacy/src/main/com/mongodb/MongoClientURI.java index 181a474d49a..43cdccc4f4b 100644 --- a/driver-legacy/src/main/com/mongodb/MongoClientURI.java +++ b/driver-legacy/src/main/com/mongodb/MongoClientURI.java @@ -42,8 +42,7 @@ *
  • {@code :portX} is optional and defaults to :27017 if not provided.
  • *
  • {@code /database} is the name of the database to login to and thus is only relevant if the * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • - *
  • {@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /} - * required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", * but should be considered as deprecated.
  • * @@ -62,8 +61,7 @@ * seed list used to connect, as if each one were provided as host/port pair in a URI using the normal mongodb protocol. *
  • {@code /database} is the name of the database to login to and thus is only relevant if the * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • - *
  • {@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /} - * required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", * but should be considered as deprecated. Additionally with the mongodb+srv protocol, TXT records are looked up from a Domain Name * Server for the given host, and the text value of each one is prepended to any options on the URI itself. Because the last specified From 98934ddc52efa48e7e3b41339e57ff4c8dc89322 Mon Sep 17 00:00:00 2001 From: Andreas Braun Date: Fri, 14 Jun 2024 16:58:57 +0200 Subject: [PATCH 30/90] Fix release workflow (#1422) * Fix input names * Set git author information JAVA-5479 --- .github/workflows/release.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cfb5002dc56..0832b079955 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,6 +9,10 @@ on: required: true type: "string" +env: + GIT_AUTHOR_EMAIL: "167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com" + GIT_AUTHOR_NAME: "mongodb-dbx-release-bot[bot]" + jobs: prepare-release: environment: release @@ -26,8 +30,8 @@ jobs: - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 with: - app-id: ${{ vars.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} - name: "Store version numbers in env variables" # The awk command to increase the version number was copied from From e4b8a83542a03709acfc8d27b561685f936f98cb Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Thu, 27 Jun 2024 17:46:43 -0700 Subject: [PATCH 31/90] Extend the regex pattern to allow top-level domains (TLDs) up to 63 characters to comply with RFC 1034 standard. (#1427) JAVA-5490 --- .../mongodb/internal/connection/DomainNameUtils.java | 2 +- .../internal/connection/DomainNameUtilsTest.java | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/connection/DomainNameUtils.java b/driver-core/src/main/com/mongodb/internal/connection/DomainNameUtils.java index a1f0938e104..b64ea64ae24 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DomainNameUtils.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DomainNameUtils.java @@ -22,7 +22,7 @@ */ public class DomainNameUtils { private static final Pattern DOMAIN_PATTERN = - Pattern.compile("^(?=.{1,255}$)((([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,6}|localhost))$"); + Pattern.compile("^(?=.{1,255}$)((([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,63}|localhost))$"); static boolean isDomainName(final String domainName) { return DOMAIN_PATTERN.matcher(domainName).matches(); diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/DomainNameUtilsTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/DomainNameUtilsTest.java index cc987cacf62..43abfa5b9f7 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/DomainNameUtilsTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/DomainNameUtilsTest.java @@ -32,23 +32,25 @@ class DomainNameUtilsTest { "123numbers.com", "mixed-123domain.net", "longdomainnameabcdefghijk.com", + "i-0123456789abcdef.ec2.internal", + "ip-10-24-34-0.ec2.internal", "xn--frosch-6ya.com", "xn--emoji-grinning-3s0b.org", "xn--bcher-kva.ch", "localhost", - "abcdefghijklmnopqrstuvwxyz0123456789-abcdefghijklmnopqrstuvwxyz.com", + "abcdefghijklmnopqrstuvwxyz0123456789-abcdefghijklmnopqrstuvwxyz.com", //63 characters label name. + "a.abcdefghijklmnopqrstuvwxyzabcdefghjklabcdefghijklmnopqrstuvwxyz", //63 characters TLD. "xn--weihnachten-uzb.org", "sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain." + "com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain." + "com.domain.com.sub.domain.subb.com" //255 characters }) void shouldReturnTrueWithValidHostName(final String hostname) { - Assertions.assertTrue(isDomainName(hostname)); + Assertions.assertTrue(isDomainName(hostname), hostname + " is not a valid domain name"); } @ParameterizedTest @ValueSource(strings = { - "xn--tst-0qa.example", "xn--frosch-6ya.w23", "-special_chars_$$.net", "special_chars_$$.net", @@ -60,7 +62,8 @@ void shouldReturnTrueWithValidHostName(final String hostname) { "notlocalhost", "домен.com", //NON-ASCII "ẞẞ.com", //NON-ASCII - "abcdefghijklmnopqrstuvwxyz0123456789-abcdefghijklmnopqrstuvwxyzl.com", + "abcdefghijklmnopqrstuvwxyz0123456789-abcdefghijklmnopqrstuvwxyzl.com", //64 characters label name. + "a.abcdefghijklmnopqrstuvwxyzabcdefghjklabcdefghijklmnopqrstuvwxyza", //64 characters TLD. "this-domain-is-really-long-because-it-just-keeps-going-and-going-and-its-still-not-done-yet-because-theres-more.net", "verylongsubdomainnamethatisreallylongandmaycausetroubleforparsing.example", "sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain." From e283f57f569e2599c7434a5c10716cccd80d44ce Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Fri, 28 Jun 2024 09:26:22 -0600 Subject: [PATCH 32/90] Include links to the Evergreen build and to the driver security testing summary in the SSDLC report (#1426) JAVA-5500 --- .evergreen/.evg.yml | 2 + .evergreen/ssdlc-report.sh | 26 +++++++++--- .../template_ssdlc_compliance_report.md | 41 +++++++++---------- 3 files changed, 42 insertions(+), 27 deletions(-) diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index c0bceb90c70..67b27964c41 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -150,6 +150,8 @@ functions: env: PRODUCT_NAME: ${product_name} PRODUCT_VERSION: ${product_version} + PRODUCT_RELEASE_CREATOR: ${author} + EVERGREEN_VERSION_ID: ${version_id} script: .evergreen/ssdlc-report.sh - command: ec2.assume_role params: diff --git a/.evergreen/ssdlc-report.sh b/.evergreen/ssdlc-report.sh index b05e510c66b..574cce48b74 100755 --- a/.evergreen/ssdlc-report.sh +++ b/.evergreen/ssdlc-report.sh @@ -5,13 +5,23 @@ set -eu # Supported/used environment variables: # PRODUCT_NAME # PRODUCT_VERSION +# PRODUCT_RELEASE_CREATOR +# EVERGREEN_VERSION_ID if [ -z "${PRODUCT_NAME}" ]; then - echo "PRODUCT_NAME must be set to a non-empty string" + printf "\nPRODUCT_NAME must be set to a non-empty string\n" exit 1 fi if [ -z "${PRODUCT_VERSION}" ]; then - echo "PRODUCT_VERSION must be set to a non-empty string" + printf "\nPRODUCT_VERSION must be set to a non-empty string\n" + exit 1 +fi +if [ -z "${PRODUCT_RELEASE_CREATOR}" ]; then + printf "\PRODUCT_RELEASE_CREATOR must be set to a non-empty string\n" + exit 1 +fi +if [ -z "${EVERGREEN_VERSION_ID}" ]; then + printf "\EVERGREEN_VERSION_ID must be set to a non-empty string\n" exit 1 fi @@ -22,7 +32,11 @@ RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" source "${RELATIVE_DIR_PATH}/javaConfig.bash" printf "\nCreating SSDLC reports\n" - +printf "\nProduct name: %s\n" "${PRODUCT_NAME}" +printf "\nProduct version: %s\n" "${PRODUCT_VERSION}" +printf "\nProduct release creator: %s\n" "${PRODUCT_RELEASE_CREATOR}" +declare -r EVERGREEN_BUILD_URL="https://spruce.mongodb.com/version/${EVERGREEN_VERSION_ID}" +printf "\nEvergreen build URL: %s\n" "${EVERGREEN_BUILD_URL}" declare -r SSDLC_PATH="${RELATIVE_DIR_PATH}/../build/ssdlc" declare -r SSDLC_STATIC_ANALYSIS_REPORTS_PATH="${SSDLC_PATH}/static-analysis-reports" mkdir "${SSDLC_PATH}" @@ -52,14 +66,16 @@ declare -r SSDLC_REPORT_PATH="${SSDLC_PATH}/ssdlc_compliance_report.md" cp "${TEMPLATE_SSDLC_REPORT_PATH}" "${SSDLC_REPORT_PATH}" declare -a SED_EDIT_IN_PLACE_OPTION if [[ "$OSTYPE" == "darwin"* ]]; then - SED_EDIT_IN_PLACE_OPTION=(-i '') + SED_EDIT_IN_PLACE_OPTION=(-i '') else - SED_EDIT_IN_PLACE_OPTION=(-i) + SED_EDIT_IN_PLACE_OPTION=(-i) fi sed "${SED_EDIT_IN_PLACE_OPTION[@]}" \ -e "s/\${product_name}/${PRODUCT_NAME}/g" \ -e "s/\${product_version}/${PRODUCT_VERSION}/g" \ -e "s/\${report_date_utc}/$(date -u +%Y-%m-%d)/g" \ + -e "s/\${product_release_creator}/${PRODUCT_RELEASE_CREATOR}/g" \ + -e "s>\${evergreen_build_url}>${EVERGREEN_BUILD_URL}>g" \ "${SSDLC_REPORT_PATH}" printf "%s\n" "${SSDLC_REPORT_PATH}" diff --git a/.evergreen/template_ssdlc_compliance_report.md b/.evergreen/template_ssdlc_compliance_report.md index 998092b65c9..adadc60fd71 100644 --- a/.evergreen/template_ssdlc_compliance_report.md +++ b/.evergreen/template_ssdlc_compliance_report.md @@ -13,30 +13,18 @@ This report is available at ${product_version} - Report date, UTC - ${report_date_utc} - - - -## Release creator - -This information is available in multiple ways: - - - - + - - + +
    EvergreenRelease creator - Go to - - https://evergreen.mongodb.com/waterfall/mongo-java-driver?bv_filter=Publish%20Release, - find the build triggered from Git tag r${product_version}, see who authored it. + ${product_release_creator} +

    + Refer to data in Papertrail for more details. + There is currently no official way to serve that data. +

    Papertrail - Refer to data in Papertrail. There is currently no official way to serve that data. - Report date, UTC${report_date_utc}
    @@ -47,7 +35,7 @@ Blocked on . The MongoDB SSDLC policy is available at . -## Third-darty dependency information +## Third-party dependency information There are no dependencies to report vulnerabilities of. Our [SBOM](https://docs.devprod.prod.corp.mongodb.com/mms/python/src/sbom/silkbomb/docs/CYCLONEDX/) lite @@ -55,7 +43,7 @@ is . All the findings in the aforementioned reports are either of the MongoDB status "False Positive" or "No Fix Needed", @@ -63,6 +51,15 @@ because code that has any other findings cannot technically get into the product may also be of interest. +## Security testing results + +The testing results are available at +<${evergreen_build_url}>. + +See the driver security testing summary + +for the description of what is tested. + ## Signature information The product artifacts are signed. From 0282a7ec8edbca65413d87b7a0f84d1583153850 Mon Sep 17 00:00:00 2001 From: Cliffred van Velzen Date: Mon, 1 Jul 2024 15:09:49 +0200 Subject: [PATCH 33/90] JAVA-5342 Fix encoding generics with nullable type parameters (#1317) --- .../org/bson/codecs/kotlinx/BsonEncoder.kt | 21 ++++++++++++++++- .../kotlinx/KotlinSerializerCodecTest.kt | 23 +++++++++++++++++++ .../codecs/kotlinx/samples/DataClasses.kt | 4 ++++ 3 files changed, 47 insertions(+), 1 deletion(-) diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt index 2e68b992700..b3ae0c8cdf4 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt @@ -139,6 +139,18 @@ internal class DefaultBsonEncoder( return true } + override fun encodeSerializableValue(serializer: SerializationStrategy, value: T) { + deferredElementName?.let { + if (value != null || configuration.explicitNulls) { + encodeName(it) + super.encodeSerializableValue(serializer, value) + } else { + deferredElementName = null + } + } + ?: super.encodeSerializableValue(serializer, value) + } + override fun encodeNullableSerializableValue(serializer: SerializationStrategy, value: T?) { deferredElementName?.let { if (value != null || configuration.explicitNulls) { @@ -158,7 +170,14 @@ internal class DefaultBsonEncoder( override fun encodeDouble(value: Double) = writer.writeDouble(value) override fun encodeInt(value: Int) = writer.writeInt32(value) override fun encodeLong(value: Long) = writer.writeInt64(value) - override fun encodeNull() = writer.writeNull() + override fun encodeNull() { + deferredElementName?.let { + if (configuration.explicitNulls) { + encodeName(it) + } + } + writer.writeNull() + } override fun encodeString(value: String) { when (state) { diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt index 14fcfa8a01c..0aed60b27ba 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt @@ -33,6 +33,7 @@ import org.bson.BsonUndefined import org.bson.codecs.DecoderContext import org.bson.codecs.EncoderContext import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.kotlinx.samples.Box import org.bson.codecs.kotlinx.samples.DataClassBsonValues import org.bson.codecs.kotlinx.samples.DataClassContainsOpen import org.bson.codecs.kotlinx.samples.DataClassContainsValueClass @@ -76,6 +77,7 @@ import org.bson.codecs.kotlinx.samples.DataClassWithMutableMap import org.bson.codecs.kotlinx.samples.DataClassWithMutableSet import org.bson.codecs.kotlinx.samples.DataClassWithNestedParameterized import org.bson.codecs.kotlinx.samples.DataClassWithNestedParameterizedDataClass +import org.bson.codecs.kotlinx.samples.DataClassWithNullableGeneric import org.bson.codecs.kotlinx.samples.DataClassWithNulls import org.bson.codecs.kotlinx.samples.DataClassWithPair import org.bson.codecs.kotlinx.samples.DataClassWithParameterizedDataClass @@ -202,6 +204,27 @@ class KotlinSerializerCodecTest { assertRoundTrips(expectedNulls, dataClass, altConfiguration) } + @Test + fun testDataClassWithNullableGenericsNotNull() { + val expected = + """{ + | "box": {"boxed": "String"} + |}""" + .trimMargin() + + val dataClass = DataClassWithNullableGeneric(Box("String")) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNullableGenericsNull() { + val expectedDefault = """{"box": {}}""" + val dataClass = DataClassWithNullableGeneric(Box(null)) + assertRoundTrips(expectedDefault, dataClass) + val expectedNull = """{"box": {"boxed": null}}""" + assertRoundTrips(expectedNull, dataClass, altConfiguration) + } + @Test fun testDataClassSelfReferential() { val expected = diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt index 0326827d4a7..a58ae541d03 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt @@ -294,3 +294,7 @@ data class DataClassWithFailingInit(val id: String) { } @Serializable data class DataClassWithSequence(val value: Sequence) + +@Serializable data class Box(val boxed: T) + +@Serializable data class DataClassWithNullableGeneric(val box: Box) From fe65ed7b6847a8560bf32f7ff087d485ff97bb5d Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Mon, 1 Jul 2024 14:14:23 +0100 Subject: [PATCH 34/90] Ported tests from bson-kotlinx to bson-kotlin (#1434) JAVA-5342 Co-authored-by: Cliffred van Velzen --- .../bson/codecs/kotlin/DataClassCodecTest.kt | 21 +++++++++++++++++++ .../bson/codecs/kotlin/samples/DataClasses.kt | 5 +++++ 2 files changed, 26 insertions(+) diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt index c115b051529..d2dbfc580cc 100644 --- a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt @@ -23,6 +23,7 @@ import org.bson.codecs.DecoderContext import org.bson.codecs.EncoderContext import org.bson.codecs.configuration.CodecConfigurationException import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.bson.codecs.kotlin.samples.Box import org.bson.codecs.kotlin.samples.DataClassEmbedded import org.bson.codecs.kotlin.samples.DataClassListOfDataClasses import org.bson.codecs.kotlin.samples.DataClassListOfListOfDataClasses @@ -55,6 +56,7 @@ import org.bson.codecs.kotlin.samples.DataClassWithMutableMap import org.bson.codecs.kotlin.samples.DataClassWithMutableSet import org.bson.codecs.kotlin.samples.DataClassWithNestedParameterized import org.bson.codecs.kotlin.samples.DataClassWithNestedParameterizedDataClass +import org.bson.codecs.kotlin.samples.DataClassWithNullableGeneric import org.bson.codecs.kotlin.samples.DataClassWithNulls import org.bson.codecs.kotlin.samples.DataClassWithObjectIdAndBsonDocument import org.bson.codecs.kotlin.samples.DataClassWithPair @@ -131,6 +133,25 @@ class DataClassCodecTest { assertDecodesTo(withStoredNulls, dataClass) } + @Test + fun testDataClassWithNullableGenericsNotNull() { + val expected = + """{ + | "box": {"boxed": "String"} + |}""" + .trimMargin() + + val dataClass = DataClassWithNullableGeneric(Box("String")) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNullableGenericsNull() { + val expected = """{"box": {}}""" + val dataClass = DataClassWithNullableGeneric(Box(null)) + assertRoundTrips(expected, dataClass) + } + @Test fun testDataClassSelfReferential() { val expected = diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt index 029b0814118..d4ba6a14f96 100644 --- a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt @@ -15,6 +15,7 @@ */ package org.bson.codecs.kotlin.samples +import kotlinx.serialization.Serializable import kotlin.time.Duration import org.bson.BsonDocument import org.bson.BsonMaxKey @@ -162,3 +163,7 @@ data class DataClassWithFailingInit(val id: String) { data class DataClassWithSequence(val value: Sequence) data class DataClassWithJVMErasure(val duration: Duration, val ints: List) + +data class Box(val boxed: T) + +data class DataClassWithNullableGeneric(val box: Box) From ab812986769966d44d21f4d4db3969e332b08894 Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Mon, 1 Jul 2024 15:26:51 +0100 Subject: [PATCH 35/90] Fix test port import (#1435) --- .../test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt | 1 - 1 file changed, 1 deletion(-) diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt index d4ba6a14f96..a320470cf23 100644 --- a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt @@ -15,7 +15,6 @@ */ package org.bson.codecs.kotlin.samples -import kotlinx.serialization.Serializable import kotlin.time.Duration import org.bson.BsonDocument import org.bson.BsonMaxKey From d8503c31a29b446ba21dfa2ded8cd38f298e3165 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Tue, 2 Jul 2024 11:26:38 -0600 Subject: [PATCH 36/90] Fix for: Include links to the Evergreen build and to the driver security testing summary in the SSDLC report (#1433) JAVA-5500 --- .evergreen/.evg.yml | 1 - .evergreen/ssdlc-report.sh | 34 ++++++++++++++++++++++++++-------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index 67b27964c41..046a54907f9 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -150,7 +150,6 @@ functions: env: PRODUCT_NAME: ${product_name} PRODUCT_VERSION: ${product_version} - PRODUCT_RELEASE_CREATOR: ${author} EVERGREEN_VERSION_ID: ${version_id} script: .evergreen/ssdlc-report.sh - command: ec2.assume_role diff --git a/.evergreen/ssdlc-report.sh b/.evergreen/ssdlc-report.sh index 574cce48b74..2958edb4327 100755 --- a/.evergreen/ssdlc-report.sh +++ b/.evergreen/ssdlc-report.sh @@ -5,7 +5,6 @@ set -eu # Supported/used environment variables: # PRODUCT_NAME # PRODUCT_VERSION -# PRODUCT_RELEASE_CREATOR # EVERGREEN_VERSION_ID if [ -z "${PRODUCT_NAME}" ]; then @@ -16,10 +15,6 @@ if [ -z "${PRODUCT_VERSION}" ]; then printf "\nPRODUCT_VERSION must be set to a non-empty string\n" exit 1 fi -if [ -z "${PRODUCT_RELEASE_CREATOR}" ]; then - printf "\PRODUCT_RELEASE_CREATOR must be set to a non-empty string\n" - exit 1 -fi if [ -z "${EVERGREEN_VERSION_ID}" ]; then printf "\EVERGREEN_VERSION_ID must be set to a non-empty string\n" exit 1 @@ -34,14 +29,37 @@ source "${RELATIVE_DIR_PATH}/javaConfig.bash" printf "\nCreating SSDLC reports\n" printf "\nProduct name: %s\n" "${PRODUCT_NAME}" printf "\nProduct version: %s\n" "${PRODUCT_VERSION}" -printf "\nProduct release creator: %s\n" "${PRODUCT_RELEASE_CREATOR}" -declare -r EVERGREEN_BUILD_URL="https://spruce.mongodb.com/version/${EVERGREEN_VERSION_ID}" -printf "\nEvergreen build URL: %s\n" "${EVERGREEN_BUILD_URL}" + declare -r SSDLC_PATH="${RELATIVE_DIR_PATH}/../build/ssdlc" declare -r SSDLC_STATIC_ANALYSIS_REPORTS_PATH="${SSDLC_PATH}/static-analysis-reports" mkdir "${SSDLC_PATH}" mkdir "${SSDLC_STATIC_ANALYSIS_REPORTS_PATH}" +declare -r EVERGREEN_PROJECT_NAME_PREFIX="${PRODUCT_NAME//-/_}" +declare -r EVERGREEN_BUILD_URL_PREFIX="https://spruce.mongodb.com/version" +declare -r GIT_TAG="r${PRODUCT_VERSION}" +GIT_COMMIT_HASH="$(git rev-list --ignore-missing -n 1 "${GIT_TAG}")" +set +e + GIT_BRANCH_MASTER="$(git branch -a --contains "${GIT_TAG}" | grep 'master$')" + GIT_BRANCH_PATCH="$(git branch -a --contains "${GIT_TAG}" | grep '\.x$')" +set -e +if [ -n "${GIT_BRANCH_MASTER}" ]; then + declare -r EVERGREEN_BUILD_URL="${EVERGREEN_BUILD_URL_PREFIX}/${EVERGREEN_PROJECT_NAME_PREFIX}_${GIT_COMMIT_HASH}" +elif [ -n "${GIT_BRANCH_PATCH}" ]; then + # strip out the patch version + declare -r EVERGREEN_PROJECT_NAME_SUFFIX="${PRODUCT_VERSION%.*}" + declare -r EVERGREEN_BUILD_URL="${EVERGREEN_BUILD_URL_PREFIX}/${EVERGREEN_PROJECT_NAME_PREFIX}_${EVERGREEN_PROJECT_NAME_SUFFIX}_${GIT_COMMIT_HASH}" +elif [[ "${PRODUCT_NAME}" == *'-snapshot' ]]; then + declare -r EVERGREEN_BUILD_URL="${EVERGREEN_BUILD_URL_PREFIX}/${EVERGREEN_VERSION_ID}" +else + printf "\nFailed to compute EVERGREEN_BUILD_URL\n" + exit 1 +fi +printf "\nEvergreen build URL: %s\n" "${EVERGREEN_BUILD_URL}" + +PRODUCT_RELEASE_CREATOR="$(git log --ignore-missing "${GIT_TAG}"^.."${GIT_TAG}" --simplify-by-decoration --pretty='format:%aN')" +printf "\nProduct release creator: %s\n" "${PRODUCT_RELEASE_CREATOR}" + printf "\nCreating SpotBugs SARIF reports\n" ./gradlew -version set +e From 88b4218a840576d7e04d0c5b882250142cc3f7f4 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Mon, 8 Jul 2024 12:42:41 -0400 Subject: [PATCH 37/90] Allow generic base classes for POJOs (#1423) This change fixes a regression which prevents the driver from encoding and decoding concrete classes which extend generic base classes, when the base class is specified as the generic type of the MongoCollection. JAVA-5173 Co-authored-by: Ross Lawley --- .../codecs/pojo/LazyPropertyModelCodec.java | 33 +++++++++-- .../org/bson/codecs/pojo/PojoCodecImpl.java | 18 +++--- .../bson/codecs/pojo/PojoCodecProvider.java | 2 +- .../org/bson/codecs/pojo/PojoCustomTest.java | 16 ++++- .../org/bson/codecs/pojo/PojoTestCase.java | 32 +++++++--- .../bson/codecs/pojo/entities/BaseField.java | 55 +++++++++++++++++ .../codecs/pojo/entities/ConcreteField.java | 27 +++++++++ .../codecs/pojo/entities/ConcreteModel.java | 27 +++++++++ .../pojo/entities/GenericBaseModel.java | 59 +++++++++++++++++++ 9 files changed, 248 insertions(+), 21 deletions(-) create mode 100644 bson/src/test/unit/org/bson/codecs/pojo/entities/BaseField.java create mode 100644 bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteField.java create mode 100644 bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteModel.java create mode 100644 bson/src/test/unit/org/bson/codecs/pojo/entities/GenericBaseModel.java diff --git a/bson/src/main/org/bson/codecs/pojo/LazyPropertyModelCodec.java b/bson/src/main/org/bson/codecs/pojo/LazyPropertyModelCodec.java index a502c337bd8..24537ce1d8e 100644 --- a/bson/src/main/org/bson/codecs/pojo/LazyPropertyModelCodec.java +++ b/bson/src/main/org/bson/codecs/pojo/LazyPropertyModelCodec.java @@ -163,19 +163,44 @@ private PropertyModel getSpecializedPropertyModel(final PropertyModel static final class NeedSpecializationCodec extends PojoCodec { private final ClassModel classModel; private final DiscriminatorLookup discriminatorLookup; + private final CodecRegistry codecRegistry; - NeedSpecializationCodec(final ClassModel classModel, final DiscriminatorLookup discriminatorLookup) { + NeedSpecializationCodec(final ClassModel classModel, final DiscriminatorLookup discriminatorLookup, final CodecRegistry codecRegistry) { this.classModel = classModel; this.discriminatorLookup = discriminatorLookup; + this.codecRegistry = codecRegistry; } @Override - public T decode(final BsonReader reader, final DecoderContext decoderContext) { - throw exception(); + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + if (value.getClass().equals(classModel.getType())) { + throw exception(); + } + tryEncode(codecRegistry.get(value.getClass()), writer, value, encoderContext); } @Override - public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + return tryDecode(reader, decoderContext); + } + + @SuppressWarnings("unchecked") + private void tryEncode(final Codec codec, final BsonWriter writer, final T value, final EncoderContext encoderContext) { + try { + codec.encode(writer, (A) value, encoderContext); + } catch (Exception e) { + throw exception(); + } + } + + @SuppressWarnings("unchecked") + public T tryDecode(final BsonReader reader, final DecoderContext decoderContext) { + Codec codec = PojoCodecImpl.getCodecFromDocument(reader, classModel.useDiscriminator(), classModel.getDiscriminatorKey(), + codecRegistry, discriminatorLookup, null, classModel.getName()); + if (codec != null) { + return codec.decode(reader, decoderContext); + } + throw exception(); } diff --git a/bson/src/main/org/bson/codecs/pojo/PojoCodecImpl.java b/bson/src/main/org/bson/codecs/pojo/PojoCodecImpl.java index bccadfb3e0d..96853000198 100644 --- a/bson/src/main/org/bson/codecs/pojo/PojoCodecImpl.java +++ b/bson/src/main/org/bson/codecs/pojo/PojoCodecImpl.java @@ -101,7 +101,8 @@ public T decode(final BsonReader reader, final DecoderContext decoderContext) { return instanceCreator.getInstance(); } else { return getCodecFromDocument(reader, classModel.useDiscriminator(), classModel.getDiscriminatorKey(), registry, - discriminatorLookup, this).decode(reader, DecoderContext.builder().checkedDiscriminator(true).build()); + discriminatorLookup, this, classModel.getName()) + .decode(reader, DecoderContext.builder().checkedDiscriminator(true).build()); } } @@ -275,10 +276,11 @@ private boolean areEquivalentTypes(final Class t1, final Class t2) } @SuppressWarnings("unchecked") - private Codec getCodecFromDocument(final BsonReader reader, final boolean useDiscriminator, final String discriminatorKey, - final CodecRegistry registry, final DiscriminatorLookup discriminatorLookup, - final Codec defaultCodec) { - Codec codec = defaultCodec; + @Nullable + static Codec getCodecFromDocument(final BsonReader reader, final boolean useDiscriminator, final String discriminatorKey, + final CodecRegistry registry, final DiscriminatorLookup discriminatorLookup, @Nullable final Codec defaultCodec, + final String simpleClassName) { + Codec codec = defaultCodec; if (useDiscriminator) { BsonReaderMark mark = reader.getMark(); reader.readStartDocument(); @@ -289,12 +291,12 @@ private Codec getCodecFromDocument(final BsonReader reader, final boolean use discriminatorKeyFound = true; try { Class discriminatorClass = discriminatorLookup.lookup(reader.readString()); - if (!codec.getEncoderClass().equals(discriminatorClass)) { - codec = (Codec) registry.get(discriminatorClass); + if (codec == null || !codec.getEncoderClass().equals(discriminatorClass)) { + codec = (Codec) registry.get(discriminatorClass); } } catch (Exception e) { throw new CodecConfigurationException(format("Failed to decode '%s'. Decoding errored with: %s", - classModel.getName(), e.getMessage()), e); + simpleClassName, e.getMessage()), e); } } else { reader.skipValue(); diff --git a/bson/src/main/org/bson/codecs/pojo/PojoCodecProvider.java b/bson/src/main/org/bson/codecs/pojo/PojoCodecProvider.java index 6a3e8bfc836..b62364b1b4b 100644 --- a/bson/src/main/org/bson/codecs/pojo/PojoCodecProvider.java +++ b/bson/src/main/org/bson/codecs/pojo/PojoCodecProvider.java @@ -97,7 +97,7 @@ private static PojoCodec createCodec(final ClassModel classModel, fina final List propertyCodecProviders, final DiscriminatorLookup discriminatorLookup) { return shouldSpecialize(classModel) ? new PojoCodecImpl<>(classModel, codecRegistry, propertyCodecProviders, discriminatorLookup) - : new LazyPropertyModelCodec.NeedSpecializationCodec<>(classModel, discriminatorLookup); + : new LazyPropertyModelCodec.NeedSpecializationCodec<>(classModel, discriminatorLookup, codecRegistry); } /** diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoCustomTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoCustomTest.java index acb63b04f06..cf8cef50282 100644 --- a/bson/src/test/unit/org/bson/codecs/pojo/PojoCustomTest.java +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoCustomTest.java @@ -38,11 +38,14 @@ import org.bson.codecs.pojo.entities.BsonRepresentationUnsupportedString; import org.bson.codecs.pojo.entities.ConcreteAndNestedAbstractInterfaceModel; import org.bson.codecs.pojo.entities.ConcreteCollectionsModel; +import org.bson.codecs.pojo.entities.ConcreteModel; +import org.bson.codecs.pojo.entities.ConcreteField; import org.bson.codecs.pojo.entities.ConcreteStandAloneAbstractInterfaceModel; import org.bson.codecs.pojo.entities.ConstructorNotPublicModel; import org.bson.codecs.pojo.entities.ConventionModel; import org.bson.codecs.pojo.entities.ConverterModel; import org.bson.codecs.pojo.entities.CustomPropertyCodecOptionalModel; +import org.bson.codecs.pojo.entities.GenericBaseModel; import org.bson.codecs.pojo.entities.GenericHolderModel; import org.bson.codecs.pojo.entities.GenericTreeModel; import org.bson.codecs.pojo.entities.InterfaceBasedModel; @@ -545,6 +548,17 @@ public void testInvalidDiscriminatorInNestedModel() { + "'simple': {'_t': 'FakeModel', 'integerField': 42, 'stringField': 'myString'}}")); } + @Test + public void testGenericBaseClass() { + CodecRegistry registry = fromProviders(new ValueCodecProvider(), PojoCodecProvider.builder().automatic(true).build()); + + ConcreteModel model = new ConcreteModel(new ConcreteField("name1")); + + String json = "{\"_t\": \"org.bson.codecs.pojo.entities.ConcreteModel\", \"field\": {\"name\": \"name1\"}}"; + roundTrip(PojoCodecProvider.builder().automatic(true), GenericBaseModel.class, model, json); + } + + @Test public void testCannotEncodeUnspecializedClasses() { CodecRegistry registry = fromProviders(getPojoCodecProviderBuilder(GenericTreeModel.class).build()); @@ -553,7 +567,7 @@ public void testCannotEncodeUnspecializedClasses() { } @Test - public void testCannotDecodeUnspecializedClasses() { + public void testCannotDecodeUnspecializedClassesWithoutADiscriminator() { assertThrows(CodecConfigurationException.class, () -> decodingShouldFail(getCodec(GenericTreeModel.class), "{'field1': 'top', 'field2': 1, " diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoTestCase.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoTestCase.java index 5b5209435cb..eb380bb7986 100644 --- a/bson/src/test/unit/org/bson/codecs/pojo/PojoTestCase.java +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoTestCase.java @@ -90,8 +90,12 @@ void roundTrip(final T value, final String json) { } void roundTrip(final PojoCodecProvider.Builder builder, final T value, final String json) { - encodesTo(getCodecRegistry(builder), value, json); - decodesTo(getCodecRegistry(builder), json, value); + roundTrip(builder, value.getClass(), value, json); + } + + void roundTrip(final PojoCodecProvider.Builder builder, final Class clazz, final T value, final String json) { + encodesTo(getCodecRegistry(builder), clazz, value, json); + decodesTo(getCodecRegistry(builder), clazz, json, value); } void threadedRoundTrip(final PojoCodecProvider.Builder builder, final T value, final String json) { @@ -109,21 +113,30 @@ void roundTrip(final CodecRegistry registry, final T value, final String jso decodesTo(registry, json, value); } + void roundTrip(final CodecRegistry registry, final Class clazz, final T value, final String json) { + encodesTo(registry, clazz, value, json); + decodesTo(registry, clazz, json, value); + } + void encodesTo(final PojoCodecProvider.Builder builder, final T value, final String json) { encodesTo(builder, value, json, false); } void encodesTo(final PojoCodecProvider.Builder builder, final T value, final String json, final boolean collectible) { - encodesTo(getCodecRegistry(builder), value, json, collectible); + encodesTo(getCodecRegistry(builder), value.getClass(), value, json, collectible); } void encodesTo(final CodecRegistry registry, final T value, final String json) { - encodesTo(registry, value, json, false); + encodesTo(registry, value.getClass(), value, json, false); + } + + void encodesTo(final CodecRegistry registry, final Class clazz, final T value, final String json) { + encodesTo(registry, clazz, value, json, false); } @SuppressWarnings("unchecked") - void encodesTo(final CodecRegistry registry, final T value, final String json, final boolean collectible) { - Codec codec = (Codec) registry.get(value.getClass()); + void encodesTo(final CodecRegistry registry, final Class clazz, final T value, final String json, final boolean collectible) { + Codec codec = (Codec) registry.get(clazz); encodesTo(codec, value, json, collectible); } @@ -144,7 +157,12 @@ void decodesTo(final PojoCodecProvider.Builder builder, final String json, f @SuppressWarnings("unchecked") void decodesTo(final CodecRegistry registry, final String json, final T expected) { - Codec codec = (Codec) registry.get(expected.getClass()); + decodesTo(registry, expected.getClass(), json, expected); + } + + @SuppressWarnings("unchecked") + void decodesTo(final CodecRegistry registry, final Class clazz, final String json, final T expected) { + Codec codec = (Codec) registry.get(clazz); decodesTo(codec, json, expected); } diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/BaseField.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/BaseField.java new file mode 100644 index 00000000000..4393c5f2d7f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/BaseField.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public abstract class BaseField { + private String name; + + public BaseField(final String name) { + this.name = name; + } + + protected BaseField() { + } + + public String getName() { + return name; + } + + public void setName(final String name) { + this.name = name; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BaseField baseField = (BaseField) o; + return Objects.equals(name, baseField.name); + } + + @Override + public int hashCode() { + return Objects.hashCode(name); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteField.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteField.java new file mode 100644 index 00000000000..6fb06a70de9 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteField.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class ConcreteField extends BaseField { + + public ConcreteField() { + } + + public ConcreteField(final String name) { + super(name); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteModel.java new file mode 100644 index 00000000000..cd406fa1392 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteModel.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class ConcreteModel extends GenericBaseModel { + + public ConcreteModel() { + } + + public ConcreteModel(final ConcreteField field) { + super(field); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericBaseModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericBaseModel.java new file mode 100644 index 00000000000..5164f9703e5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericBaseModel.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +import java.util.Objects; + +@BsonDiscriminator() +public class GenericBaseModel { + + private T field; + + public GenericBaseModel(final T field) { + this.field = field; + } + + public GenericBaseModel() { + } + + public T getField() { + return field; + } + + public void setField(final T field) { + this.field = field; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GenericBaseModel that = (GenericBaseModel) o; + return Objects.equals(field, that.field); + } + + @Override + public int hashCode() { + return Objects.hashCode(field); + } +} From 85c316589f0ba55de04e8374b700434d1be99fb0 Mon Sep 17 00:00:00 2001 From: Andreas Braun Date: Tue, 9 Jul 2024 22:17:36 +0200 Subject: [PATCH 38/90] Attribute release commit and tag to user triggering the workflow (#1437) --- .github/workflows/release.yml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0832b079955..3136189bc27 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,10 +9,6 @@ on: required: true type: "string" -env: - GIT_AUTHOR_EMAIL: "167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com" - GIT_AUTHOR_NAME: "mongodb-dbx-release-bot[bot]" - jobs: prepare-release: environment: release @@ -72,10 +68,13 @@ jobs: echo '🆕 Creating new release branch ${RELEASE_BRANCH} from ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY git checkout -b ${RELEASE_BRANCH} + # Set commit author information to the user that triggered the release workflow - name: "Set git author information" run: | - git config user.name "${GIT_AUTHOR_NAME}" - git config user.email "${GIT_AUTHOR_EMAIL}" + GITHUB_USER_NAME=$(gh api users/${{ github.actor }} --jq '.name') + GITHUB_USER_ID=$(gh api users/${{ github.actor }} --jq '.id') + git config user.name "${GITHUB_USER_NAME}}" + git config user.email "${GITHUB_USER_ID}+${{ github.actor }}@users.noreply.github.com" # This step bumps version numbers in build.gradle and creates git artifacts for the release - name: "Bump version numbers and create release tag" From e228972fe98f428ef1ff03ef5543d307071a8075 Mon Sep 17 00:00:00 2001 From: Andreas Braun Date: Tue, 9 Jul 2024 22:46:07 +0200 Subject: [PATCH 39/90] Support pre-releases in release tooling (#1425) --- .github/workflows/bump-and-tag.sh | 22 +++++---- .github/workflows/bump-version.sh | 13 ++++++ .github/workflows/release.yml | 74 +++++++++++++++++++++++-------- 3 files changed, 82 insertions(+), 27 deletions(-) create mode 100755 .github/workflows/bump-version.sh diff --git a/.github/workflows/bump-and-tag.sh b/.github/workflows/bump-and-tag.sh index 84b7567427c..9e735586e91 100755 --- a/.github/workflows/bump-and-tag.sh +++ b/.github/workflows/bump-and-tag.sh @@ -1,18 +1,22 @@ #!/usr/bin/env bash set -e -# This script assumes that release X.Y.Z will always be created from X.Y.Z-SNAPSHOT" -echo "Replace snapshot version with release version ${RELEASE_VERSION} in build.gradle" -sed --in-place "s/version = '.*-SNAPSHOT'/version = '${RELEASE_VERSION}'/g" build.gradle +if [ "$#" -ne 3 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi -echo "Create package commit for ${RELEASE_VERSION}" -git commit -m "Version: bump ${RELEASE_VERSION}" build.gradle +CURRENT_VERSION=$1 +RELEASE_VERSION=$2 +NEXT_VERSION=$3 + +SCRIPT_DIR=$(dirname ${BASH_SOURCE[0]}) + +echo "Bump version in build.gradle to ${RELEASE_VERSION}" +${SCRIPT_DIR}/bump-version.sh "${RELEASE_VERSION_WITHOUT_SUFFIX}-SNAPSHOT" "${RELEASE_VERSION}" echo "Create release tag for ${RELEASE_VERSION}" git tag -a -m "${RELEASE_VERSION}" r${RELEASE_VERSION} echo "Bump to snapshot version for ${NEXT_VERSION}" -sed --in-place "s/version = '${RELEASE_VERSION}'/version = '${NEXT_VERSION}-SNAPSHOT'/g" build.gradle - -echo "Create commit for version bump to ${NEXT_VERSION}" -git commit -m "Version: bump ${NEXT_VERSION}-SNAPSHOT" build.gradle +${SCRIPT_DIR}/bump-version.sh "${RELEASE_VERSION}" "${NEXT_VERSION}-SNAPSHOT" diff --git a/.github/workflows/bump-version.sh b/.github/workflows/bump-version.sh new file mode 100755 index 00000000000..5f39df82d79 --- /dev/null +++ b/.github/workflows/bump-version.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +FROM_VERSION=$1 +TO_VERSION=$2 + +sed --in-place "s/version = '${FROM_VERSION}'/version = '${TO_VERSION}'/g" build.gradle +git commit -m "Version: bump ${TO_VERSION}" build.gradle diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3136189bc27..4724227e0ff 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,63 +32,101 @@ jobs: - name: "Store version numbers in env variables" # The awk command to increase the version number was copied from # StackOverflow: https://stackoverflow.com/a/61921674/3959933 + # Variables set here: + # RELEASE_VERSION: The version the deployment is expected to create + # RELEASE_VERSION_WITHOUT_SUFFIX: The version without any stability + # suffixes. Example: 5.2.0-beta0 => 5.2.0 + # NEXT_VERSION: The next version to be released. For pre-releases, the + # next version is a snapshot of the pre-release version. Examples: + # 5.2.0 => 5.2.1; 5.2.0-beta0 => 5.2.0 + # RELEASE_BRANCH: The name of the stable branch for this release series + # Example: 5.2.0 => 5.2.x + # Example: 5.2.0-beta1 => run: | echo RELEASE_VERSION=${{ inputs.version }} >> $GITHUB_ENV - echo NEXT_VERSION=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF += 1 ; print}') >> $GITHUB_ENV - echo RELEASE_BRANCH=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF = "x" ; print}') >> $GITHUB_ENV + echo RELEASE_VERSION_WITHOUT_SUFFIX=$(echo ${{ inputs.version }} | awk -F- '{print $1}') >> $GITHUB_ENV + if [[ "${{ inputs.version }}" =~ (alpha|beta|rc)[0-9]+$ ]]; then + echo NEXT_VERSION=$(echo ${{ inputs.version }} | awk -F- '{print $1}') >> $GITHUB_ENV + echo RELEASE_BRANCH=${{ github.ref_name }} >> $GITHUB_ENV + else + echo NEXT_VERSION=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF += 1 ; print}') >> $GITHUB_ENV + echo RELEASE_BRANCH=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF = "x" ; print}') >> $GITHUB_ENV + fi + + - name: "Ensure current snapshot version matches release version" + run: | + grep -q "version = '${{ env.RELEASE_VERSION_WITHOUT_SUFFIX }}-SNAPSHOT'" build.gradle + if [[ $? != 0 ]]; then + echo '❌ Release failed: version in build.gradle is not a snapshot for release version ${{ inputs.version }}' >> $GITHUB_STEP_SUMMARY + exit 1 + fi - name: "Ensure release tag does not already exist" run: | - if [[ $(git tag -l r${RELEASE_VERSION}) == r${RELEASE_VERSION} ]]; then + if [[ $(git tag -l r${{ env.RELEASE_VERSION }}) == r${{ env.RELEASE_VERSION }} ]]; then echo '❌ Release failed: tag for version ${{ inputs.version }} already exists' >> $GITHUB_STEP_SUMMARY exit 1 fi # For patch releases (A.B.C where C != 0), we expect the release to be - # triggered from the A.B.x maintenance branch + # triggered from the A.B.x maintenance branch. We use the release version + # without suffixes to avoid mistakes when making pre-releases - name: "Fail if patch release is created from wrong release branch" - if: ${{ !endsWith(inputs.version, '.0') && env.RELEASE_BRANCH != github.ref_name }} + if: ${{ !endsWith(env.RELEASE_VERSION_WITHOUT_SUFFIX, '.0') && env.RELEASE_BRANCH != github.ref_name }} run: | echo '❌ Release failed due to branch mismatch: expected ${{ inputs.version }} to be released from ${{ env.RELEASE_BRANCH }}, got ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY exit 1 # For non-patch releases (A.B.C where C == 0), we expect the release to - # be triggered from master or the A.B.x maintenance branch + # be triggered from master or the A.B.x maintenance branch. This includes + # pre-releases for any non-patch releases, e.g. 5.2.0-beta1 - name: "Fail if non-patch release is created from wrong release branch" - if: ${{ endsWith(inputs.version, '.0') && env.RELEASE_BRANCH != github.ref_name && github.ref_name != 'master' }} + if: ${{ endsWith(env.RELEASE_VERSION_WITHOUT_SUFFIX, '.0') && env.RELEASE_BRANCH != github.ref_name && github.ref_name != 'master' }} run: | echo '❌ Release failed due to branch mismatch: expected ${{ inputs.version }} to be released from ${{ env.RELEASE_BRANCH }} or master, got ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY exit 1 - # If a non-patch release is created from a branch other than its - # maintenance branch, create that branch from the current one and push it - - name: "Create new release branch for non-patch release" - if: ${{ endsWith(inputs.version, '.0') && env.RELEASE_BRANCH != github.ref_name }} - run: | - echo '🆕 Creating new release branch ${RELEASE_BRANCH} from ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY - git checkout -b ${RELEASE_BRANCH} - # Set commit author information to the user that triggered the release workflow - name: "Set git author information" run: | GITHUB_USER_NAME=$(gh api users/${{ github.actor }} --jq '.name') GITHUB_USER_ID=$(gh api users/${{ github.actor }} --jq '.id') - git config user.name "${GITHUB_USER_NAME}}" + git config user.name "${GITHUB_USER_NAME}" git config user.email "${GITHUB_USER_ID}+${{ github.actor }}@users.noreply.github.com" + # If a non-patch release is created from a branch other than its + # maintenance branch, create that branch from the current one and push it + # Pre-releases don't have this behaviour, so we can check the full release + # version including stability suffixes to exclude those + - name: "Create new release branch for non-patch release" + if: ${{ endsWith(env.RELEASE_VERSION, '.0') && env.RELEASE_BRANCH != github.ref_name }} + run: | + echo '🆕 Creating new release branch ${{ env.RELEASE_BRANCH }} from ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + git checkout -b ${{ env.RELEASE_BRANCH }} + NEXT_MINOR_VERSION=$(echo "${{ env.RELEASE_VERSION }}" | awk -F. -v OFS=. '{$2 += 1 ; $NF = 0 ; print}') + echo "➡️ Bumping version for ${{ github.ref_name }} branch to ${NEXT_MINOR_VERSION}" >> $GITHUB_STEP_SUMMARY + git checkout ${{ github.ref_name }} + .github/workflows/bump-version.sh "${{ env.RELEASE_VERSION_WITHOUT_SUFFIX }}-SNAPSHOT" "${NEXT_MINOR_VERSION}-SNAPSHOT" + git push origin ${{ github.ref_name }} + git checkout ${{ env.RELEASE_BRANCH }} + # This step bumps version numbers in build.gradle and creates git artifacts for the release - name: "Bump version numbers and create release tag" - run: .github/workflows/bump-and-tag.sh + run: .github/workflows/bump-and-tag.sh "${{ env.RELEASE_VERSION_WITHOUT_SUFFIX }}" "${{ env.RELEASE_VERSION }}" "${{ env.NEXT_VERSION }}" - name: "Push release branch and tag" run: | - git push origin ${RELEASE_BRANCH} + git push origin ${{ env.RELEASE_BRANCH }} git push origin r${{ env.RELEASE_VERSION }} - name: "Create draft release with generated changelog" run: | + if [[ "${{ inputs.version }}" =~ (alpha|beta|rc) ]]; then + PRERELEASE="--prerelease --latest=false" + fi echo "RELEASE_URL=$(\ gh release create r${RELEASE_VERSION} \ + ${PRERELEASE} \ --target ${{ env.RELEASE_BRANCH }} \ --title "Java Driver ${{ env.RELEASE_VERSION }} ($(date '+%B %d, %Y'))" \ --generate-notes \ From 42fec75842a723549bbcf3a11aa62f8a3847dd42 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Wed, 10 Jul 2024 07:33:31 -0400 Subject: [PATCH 40/90] Bump maxWireVersion for MongoDB 8.0 (#1442) JAVA-5511 --- .../src/main/com/mongodb/connection/ServerDescription.java | 2 +- .../internal/operation/OperationUnitSpecification.groovy | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/driver-core/src/main/com/mongodb/connection/ServerDescription.java b/driver-core/src/main/com/mongodb/connection/ServerDescription.java index 1bf0a037924..d97e848c163 100644 --- a/driver-core/src/main/com/mongodb/connection/ServerDescription.java +++ b/driver-core/src/main/com/mongodb/connection/ServerDescription.java @@ -63,7 +63,7 @@ public class ServerDescription { * The maximum supported driver wire version * @since 3.8 */ - public static final int MAX_DRIVER_WIRE_VERSION = 21; + public static final int MAX_DRIVER_WIRE_VERSION = 25; private static final int DEFAULT_MAX_DOCUMENT_SIZE = 0x1000000; // 16MB diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy index 9ce1e4605e7..01ad72455fb 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy @@ -63,6 +63,7 @@ class OperationUnitSpecification extends Specification { [6, 2]: 19, [6, 3]: 20, [7, 0]: 21, + [9, 0]: 25, ] static int getMaxWireVersionForServerVersion(List serverVersion) { From 722d9839da6b9ffad2547c9879ad5af13e1419aa Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Fri, 12 Jul 2024 10:49:15 -0600 Subject: [PATCH 41/90] Migrate `com.mongodb.client.unified.UnifiedTest` and all its descendants/users to JUnit 5 (#1410) JAVA-5495 --- build.gradle | 2 +- .../client/coroutine/UnifiedCrudTest.kt | 23 +-- .../kotlin/client/coroutine/UnifiedTest.kt | 11 +- .../mongodb/kotlin/client/UnifiedCrudTest.kt | 23 +-- .../com/mongodb/kotlin/client/UnifiedTest.kt | 11 +- .../client/unified/ChangeStreamsTest.java | 43 +++-- .../unified/ClientSideEncryptionTest.java | 19 +- .../unified/CollectionManagementTest.java | 19 +- .../client/unified/CommandLoggingTest.java | 19 +- .../client/unified/CommandMonitoringTest.java | 19 +- .../unified/ConnectionPoolLoggingTest.java | 22 +-- .../client/unified/IndexManagmentTest.java | 19 +- .../client/unified/LoadBalancerTest.java | 41 +++-- .../unified/ServerSelectionLoggingTest.java | 17 +- .../client/unified/SessionsTest.java | 16 +- .../client/unified/UnifiedCrudTest.java | 20 +-- .../client/unified/UnifiedGridFSTest.java | 18 +- .../unified/UnifiedReactiveStreamsTest.java | 12 +- .../unified/UnifiedRetryableReadsTest.java | 43 +++-- .../unified/UnifiedRetryableWritesTest.java | 20 +-- ...ifiedServerDiscoveryAndMonitoringTest.java | 25 +-- .../unified/UnifiedTransactionsTest.java | 18 +- .../unified/UnifiedWriteConcernTest.java | 15 +- .../client/unified/VersionedApiTest.java | 17 +- .../client/unified/ChangeStreamsTest.java | 17 +- .../unified/ClientSideEncryptionTest.java | 19 +- .../unified/CollectionManagementTest.java | 19 +- .../client/unified/CommandLoggingTest.java | 22 +-- .../client/unified/CommandMonitoringTest.java | 22 +-- .../unified/ConnectionPoolLoggingTest.java | 21 +-- .../client/unified/IndexManagmentTest.java | 19 +- .../client/unified/LoadBalancerTest.java | 18 +- .../unified/ServerSelectionLoggingTest.java | 17 +- .../mongodb/client/unified/SessionsTest.java | 17 +- .../unified/UnifiedAtlasDataLakeTest.java | 20 +-- .../client/unified/UnifiedAuthTest.java | 16 +- .../client/unified/UnifiedCrudTest.java | 31 ++-- .../client/unified/UnifiedGridFSTest.java | 22 +-- .../unified/UnifiedRetryableReadsTest.java | 20 +-- .../unified/UnifiedRetryableWritesTest.java | 22 +-- ...ifiedServerDiscoveryAndMonitoringTest.java | 36 ++-- .../client/unified/UnifiedSyncTest.java | 12 +- .../mongodb/client/unified/UnifiedTest.java | 168 +++++++++++------- .../unified/UnifiedTestFailureValidator.java | 66 ++++--- .../client/unified/UnifiedTestValidator.java | 31 +--- .../unified/UnifiedTransactionsTest.java | 18 +- .../unified/UnifiedWriteConcernTest.java | 15 +- .../client/unified/VersionedApiTest.java | 18 +- ...WithTransactionHelperTransactionsTest.java | 16 +- .../mongodb/workload/WorkloadExecutor.java | 28 ++- 50 files changed, 468 insertions(+), 754 deletions(-) diff --git a/build.gradle b/build.gradle index 59745250539..693b514b738 100644 --- a/build.gradle +++ b/build.gradle @@ -57,7 +57,7 @@ ext { awsSdkV1Version = '1.12.337' mongoCryptVersion = '1.10.0-SNAPSHOT' projectReactorVersion = '2022.0.0' - junitBomVersion = '5.8.2' + junitBomVersion = '5.10.2' logbackVersion = '1.3.14' gitVersion = getGitVersion() } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedCrudTest.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedCrudTest.kt index 15e3a9b7b65..47e1ea6a781 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedCrudTest.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedCrudTest.kt @@ -15,32 +15,21 @@ */ package com.mongodb.kotlin.client.coroutine -import com.mongodb.client.unified.UnifiedCrudTest.customSkips +import com.mongodb.client.unified.UnifiedCrudTest.doSkips import java.io.IOException import java.net.URISyntaxException -import org.bson.BsonArray -import org.bson.BsonDocument -import org.junit.runners.Parameterized +import org.junit.jupiter.params.provider.Arguments -internal class UnifiedCrudTest( - fileDescription: String?, - testDescription: String, - schemaVersion: String, - runOnRequirements: BsonArray?, - entitiesArray: BsonArray, - initialData: BsonArray, - definition: BsonDocument -) : UnifiedTest(fileDescription, schemaVersion, runOnRequirements, entitiesArray, initialData, definition) { +internal class UnifiedCrudTest() : UnifiedTest() { - init { - customSkips(fileDescription, testDescription) + override fun skips(fileDescription: String, testDescription: String) { + doSkips(fileDescription, testDescription) } companion object { @JvmStatic - @Parameterized.Parameters(name = "{0}: {1}") @Throws(URISyntaxException::class, IOException::class) - fun data(): Collection?>? { + fun data(): Collection? { return getTestData("unified-test-format/crud") } } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedTest.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedTest.kt index b8eb32da0f5..b027b3946c5 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedTest.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedTest.kt @@ -23,17 +23,8 @@ import com.mongodb.client.gridfs.GridFSBucket import com.mongodb.client.unified.UnifiedTest as JUnifiedTest import com.mongodb.client.vault.ClientEncryption import com.mongodb.kotlin.client.coroutine.syncadapter.SyncMongoClient -import org.bson.BsonArray -import org.bson.BsonDocument -internal abstract class UnifiedTest( - fileDescription: String?, - schemaVersion: String, - runOnRequirements: BsonArray?, - entitiesArray: BsonArray, - initialData: BsonArray, - definition: BsonDocument -) : JUnifiedTest(fileDescription, schemaVersion, runOnRequirements, entitiesArray, initialData, definition) { +internal abstract class UnifiedTest() : JUnifiedTest() { override fun createMongoClient(settings: MongoClientSettings): JMongoClient = SyncMongoClient(MongoClient.create(settings)) diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedCrudTest.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedCrudTest.kt index 143d8410479..55d77d42e7b 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedCrudTest.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedCrudTest.kt @@ -15,32 +15,21 @@ */ package com.mongodb.kotlin.client -import com.mongodb.client.unified.UnifiedCrudTest.customSkips +import com.mongodb.client.unified.UnifiedCrudTest.doSkips import java.io.IOException import java.net.URISyntaxException -import org.bson.BsonArray -import org.bson.BsonDocument -import org.junit.runners.Parameterized +import org.junit.jupiter.params.provider.Arguments -internal class UnifiedCrudTest( - fileDescription: String?, - testDescription: String, - schemaVersion: String, - runOnRequirements: BsonArray?, - entitiesArray: BsonArray, - initialData: BsonArray, - definition: BsonDocument -) : UnifiedTest(fileDescription, schemaVersion, runOnRequirements, entitiesArray, initialData, definition) { +internal class UnifiedCrudTest() : UnifiedTest() { - init { - customSkips(fileDescription, testDescription) + override fun skips(fileDescription: String, testDescription: String) { + doSkips(fileDescription, testDescription) } companion object { @JvmStatic - @Parameterized.Parameters(name = "{0}: {1}") @Throws(URISyntaxException::class, IOException::class) - fun data(): Collection?>? { + fun data(): Collection? { return getTestData("unified-test-format/crud") } } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedTest.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedTest.kt index 99a56849d7c..4f4726bbb6f 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedTest.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/UnifiedTest.kt @@ -23,17 +23,8 @@ import com.mongodb.client.gridfs.GridFSBucket import com.mongodb.client.unified.UnifiedTest as JUnifiedTest import com.mongodb.client.vault.ClientEncryption import com.mongodb.kotlin.client.syncadapter.SyncMongoClient -import org.bson.BsonArray -import org.bson.BsonDocument -internal abstract class UnifiedTest( - fileDescription: String?, - schemaVersion: String, - runOnRequirements: BsonArray?, - entitiesArray: BsonArray, - initialData: BsonArray, - definition: BsonDocument -) : JUnifiedTest(fileDescription, schemaVersion, runOnRequirements, entitiesArray, initialData, definition) { +internal abstract class UnifiedTest() : JUnifiedTest() { override fun createMongoClient(settings: MongoClientSettings): JMongoClient = SyncMongoClient(MongoClient.create(settings)) diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java index f1b3c435b4b..5a48dc343af 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java @@ -19,8 +19,9 @@ import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; -import org.junit.After; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; @@ -32,9 +33,9 @@ import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableWaitForBatchCursorCreation; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorOpen; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableWaitForBatchCursorCreation; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public final class ChangeStreamsTest extends UnifiedReactiveStreamsTest { +final class ChangeStreamsTest extends UnifiedReactiveStreamsTest { private static final List ERROR_REQUIRED_FROM_CHANGE_STREAM_INITIALIZATION_TESTS = Arrays.asList( @@ -54,32 +55,44 @@ public final class ChangeStreamsTest extends UnifiedReactiveStreamsTest { + "but instead depend on a server error" ); - - public ChangeStreamsTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeFalse(ERROR_REQUIRED_FROM_CHANGE_STREAM_INITIALIZATION_TESTS.contains(testDescription)); assumeFalse(EVENT_SENSITIVE_TESTS.contains(testDescription)); + } + @BeforeEach + @Override + public void setUp(@Nullable final String fileDescription, + @Nullable final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + super.setUp( + fileDescription, + testDescription, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); enableSleepAfterCursorOpen(256); - if (REQUIRES_BATCH_CURSOR_CREATION_WAITING.contains(testDescription)) { enableWaitForBatchCursorCreation(); } } - @After + @AfterEach + @Override public void cleanUp() { super.cleanUp(); disableSleep(); disableWaitForBatchCursorCreation(); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/change-streams"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideEncryptionTest.java index ae176a66142..8169a300e0e 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideEncryptionTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideEncryptionTest.java @@ -16,27 +16,14 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class ClientSideEncryptionTest extends UnifiedReactiveStreamsTest { - public ClientSideEncryptionTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class ClientSideEncryptionTest extends UnifiedReactiveStreamsTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/client-side-encryption"); } - } - diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CollectionManagementTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CollectionManagementTest.java index 9cac1ed1492..593bcc7fd9e 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CollectionManagementTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CollectionManagementTest.java @@ -16,28 +16,21 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class CollectionManagementTest extends UnifiedReactiveStreamsTest { - public CollectionManagementTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class CollectionManagementTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeFalse(testDescription.equals("modifyCollection to changeStreamPreAndPostImages enabled")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/collection-management"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandLoggingTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandLoggingTest.java index 5b794b1d636..eed17b8ec33 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandLoggingTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandLoggingTest.java @@ -16,30 +16,23 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class CommandLoggingTest extends UnifiedReactiveStreamsTest { - public CommandLoggingTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class CommandLoggingTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { // The driver has a hack where getLastError command is executed as part of the handshake in order to get a connectionId // even when the hello command response doesn't contain it. assumeFalse(fileDescription.equals("pre-42-server-connection-id")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/command-logging"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandMonitoringTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandMonitoringTest.java index 0208293be93..c47777d1709 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandMonitoringTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandMonitoringTest.java @@ -16,30 +16,23 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class CommandMonitoringTest extends UnifiedReactiveStreamsTest { - public CommandMonitoringTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class CommandMonitoringTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { // The driver has a hack where getLastError command is executed as part of the handshake in order to get a connectionId // even when the hello command response doesn't contain it. assumeFalse(fileDescription.equals("pre-42-server-connection-id")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/command-monitoring"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ConnectionPoolLoggingTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ConnectionPoolLoggingTest.java index 65d9bd4d1fa..5a6ee9474c1 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ConnectionPoolLoggingTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ConnectionPoolLoggingTest.java @@ -16,33 +16,23 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class ConnectionPoolLoggingTest extends UnifiedReactiveStreamsTest { - - - public ConnectionPoolLoggingTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class ConnectionPoolLoggingTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { // The implementation of the functionality related to clearing the connection pool before closing the connection // will be carried out once the specification is finalized and ready. assumeFalse(testDescription.equals("Connection checkout fails due to error establishing connection")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/connection-monitoring-and-pooling/logging"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/IndexManagmentTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/IndexManagmentTest.java index 3cb29d47aeb..931a53dba40 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/IndexManagmentTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/IndexManagmentTest.java @@ -16,27 +16,14 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class IndexManagmentTest extends UnifiedReactiveStreamsTest { - - public IndexManagmentTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class IndexManagmentTest extends UnifiedReactiveStreamsTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/index-management"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/LoadBalancerTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/LoadBalancerTest.java index 1438c194c4a..ff57a6afff1 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/LoadBalancerTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/LoadBalancerTest.java @@ -19,8 +19,9 @@ import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; -import org.junit.After; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; @@ -31,9 +32,9 @@ import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableSleep; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorClose; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorOpen; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class LoadBalancerTest extends UnifiedReactiveStreamsTest { +final class LoadBalancerTest extends UnifiedReactiveStreamsTest { private static final List CURSOR_OPEN_TIMING_SENSITIVE_TESTS = Arrays.asList( @@ -52,11 +53,8 @@ public class LoadBalancerTest extends UnifiedReactiveStreamsTest { "pinned connections are returned after a network error during a killCursors request", "a connection can be shared by a transaction and a cursor"); - public LoadBalancerTest(final String fileDescription, - final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(fileDescription, schemaVersion, runOnRequirements, entities, initialData, definition); + @Override + protected void skips(final String fileDescription, final String testDescription) { // Reactive streams driver can't implement these tests because the underlying cursor is closed on error, which // breaks assumption in the tests that closing the cursor is something that happens under user control assumeFalse(testDescription.equals("pinned connections are not returned after an network error during getMore")); @@ -64,7 +62,26 @@ public LoadBalancerTest(final String fileDescription, // Reactive streams driver can't implement this test because there is no way to tell that a change stream cursor // that has not yet received any results has even initiated the change stream assumeFalse(testDescription.equals("change streams pin to a connection")); + } + @Override + @BeforeEach + public void setUp( + @Nullable final String fileDescription, + @Nullable final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + super.setUp( + fileDescription, + testDescription, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); if (CURSOR_OPEN_TIMING_SENSITIVE_TESTS.contains(testDescription)) { enableSleepAfterCursorOpen(256); } @@ -74,14 +91,14 @@ public LoadBalancerTest(final String fileDescription, } } - @After + @Override + @AfterEach public void cleanUp() { super.cleanUp(); disableSleep(); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/load-balancers"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ServerSelectionLoggingTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ServerSelectionLoggingTest.java index 433329def96..d78522fb75c 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ServerSelectionLoggingTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ServerSelectionLoggingTest.java @@ -16,25 +16,14 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public final class ServerSelectionLoggingTest extends UnifiedReactiveStreamsTest { - public ServerSelectionLoggingTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class ServerSelectionLoggingTest extends UnifiedReactiveStreamsTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/server-selection/logging"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/SessionsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/SessionsTest.java index e07856e8daf..81cd47637a0 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/SessionsTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/SessionsTest.java @@ -16,24 +16,14 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class SessionsTest extends UnifiedReactiveStreamsTest { - public SessionsTest(@SuppressWarnings("unused") final String fileDescription, @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class SessionsTest extends UnifiedReactiveStreamsTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/sessions"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedCrudTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedCrudTest.java index ea1ad44d6fd..e3154d351aa 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedCrudTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedCrudTest.java @@ -16,27 +16,21 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static com.mongodb.client.unified.UnifiedCrudTest.customSkips; +import static com.mongodb.client.unified.UnifiedCrudTest.doSkips; -public class UnifiedCrudTest extends UnifiedReactiveStreamsTest { - public UnifiedCrudTest(final String fileDescription, final String testDescription, final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - customSkips(fileDescription, testDescription); +final class UnifiedCrudTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { + doSkips(fileDescription, testDescription); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/crud"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedGridFSTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedGridFSTest.java index 0df4df51f8c..6a8eba3e96c 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedGridFSTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedGridFSTest.java @@ -16,30 +16,24 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedGridFSTest extends UnifiedReactiveStreamsTest { - public UnifiedGridFSTest(@SuppressWarnings("unused") final String fileDescription, final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class UnifiedGridFSTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { // contentType is deprecated in GridFS spec, and 4.x Java driver no longer support it, so skipping this test assumeFalse(testDescription.equals("upload when contentType is provided")); // Re-enable when JAVA-4214 is fixed assumeFalse(testDescription.equals("delete when files entry does not exist and there are orphaned chunks")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/gridfs"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedReactiveStreamsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedReactiveStreamsTest.java index d4f079bd410..d0c0844bbc8 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedReactiveStreamsTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedReactiveStreamsTest.java @@ -23,7 +23,6 @@ import com.mongodb.client.gridfs.GridFSBucket; import com.mongodb.client.unified.UnifiedTest; import com.mongodb.client.vault.ClientEncryption; -import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.MongoClients; import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; import com.mongodb.reactivestreams.client.internal.vault.ClientEncryptionImpl; @@ -31,18 +30,9 @@ import com.mongodb.reactivestreams.client.syncadapter.SyncGridFSBucket; import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; import com.mongodb.reactivestreams.client.syncadapter.SyncMongoDatabase; -import org.bson.BsonArray; -import org.bson.BsonDocument; public abstract class UnifiedReactiveStreamsTest extends UnifiedTest { - public UnifiedReactiveStreamsTest(@Nullable final String fileDescription, final String schemaVersion, final BsonArray runOnRequirements, - final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - super(fileDescription, schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - } - - public UnifiedReactiveStreamsTest(final String schemaVersion, final BsonArray runOnRequirements, - final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - this(null, schemaVersion, runOnRequirements, entitiesArray, initialData, definition); + protected UnifiedReactiveStreamsTest() { } @Override diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java index 540cb0673bb..d7f3df0f34a 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java @@ -16,24 +16,45 @@ package com.mongodb.reactivestreams.client.unified; +import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; -import org.junit.After; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static com.mongodb.client.unified.UnifiedRetryableReadsTest.customSkips; +import static com.mongodb.client.unified.UnifiedRetryableReadsTest.doSkips; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableWaitForBatchCursorCreation; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableWaitForBatchCursorCreation; -public class UnifiedRetryableReadsTest extends UnifiedReactiveStreamsTest { - public UnifiedRetryableReadsTest(final String fileDescription, final String testDescription, final String schemaVersion, - final BsonArray runOnRequirements, final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - customSkips(fileDescription, testDescription); +final class UnifiedRetryableReadsTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { + doSkips(fileDescription, testDescription); + } + + @Override + @BeforeEach + public void setUp( + final String fileDescription, + final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + super.setUp( + fileDescription, + testDescription, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); if (fileDescription.startsWith("changeStreams") || testDescription.contains("ChangeStream")) { // Several reactive change stream tests fail if we don't block waiting for batch cursor creation. enableWaitForBatchCursorCreation(); @@ -42,14 +63,14 @@ public UnifiedRetryableReadsTest(final String fileDescription, final String test } } - @After + @Override + @AfterEach public void cleanUp() { super.cleanUp(); disableWaitForBatchCursorCreation(); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/retryable-reads"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableWritesTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableWritesTest.java index 10900adbd51..182ead20c22 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableWritesTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableWritesTest.java @@ -16,27 +16,21 @@ package com.mongodb.reactivestreams.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static com.mongodb.client.unified.UnifiedRetryableWritesTest.customSkips; +import static com.mongodb.client.unified.UnifiedRetryableWritesTest.doSkips; -public class UnifiedRetryableWritesTest extends UnifiedReactiveStreamsTest { - public UnifiedRetryableWritesTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - customSkips(testDescription); +final class UnifiedRetryableWritesTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { + doSkips(testDescription); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/retryable-writes"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java index b32137abd4a..5b12ba14de9 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java @@ -16,32 +16,19 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.Before; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class UnifiedServerDiscoveryAndMonitoringTest extends UnifiedReactiveStreamsTest { - public UnifiedServerDiscoveryAndMonitoringTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class UnifiedServerDiscoveryAndMonitoringTest extends UnifiedReactiveStreamsTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/server-discovery-and-monitoring"); } - @Before - public void before() { - com.mongodb.client.unified.UnifiedServerDiscoveryAndMonitoringTest.skipTests(getDefinition()); + @Override + protected void skips(final String fileDescription, final String testDescription) { + com.mongodb.client.unified.UnifiedServerDiscoveryAndMonitoringTest.doSkips(getDefinition()); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedTransactionsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedTransactionsTest.java index aed99bd8d70..18a92f8eee8 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedTransactionsTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedTransactionsTest.java @@ -16,9 +16,7 @@ package com.mongodb.reactivestreams.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; @@ -26,14 +24,11 @@ import static com.mongodb.ClusterFixture.isSharded; import static com.mongodb.ClusterFixture.serverVersionLessThan; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedTransactionsTest extends UnifiedReactiveStreamsTest { - public UnifiedTransactionsTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); +final class UnifiedTransactionsTest extends UnifiedReactiveStreamsTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeFalse(fileDescription.equals("count")); if (serverVersionLessThan(4, 4) && isSharded()) { assumeFalse(fileDescription.equals("pin-mongos") && testDescription.equals("distinct")); @@ -43,8 +38,7 @@ public UnifiedTransactionsTest(@SuppressWarnings("unused") final String fileDesc } } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/transactions"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedWriteConcernTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedWriteConcernTest.java index 7aff8e0437b..3b7fa4afb00 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedWriteConcernTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedWriteConcernTest.java @@ -16,23 +16,14 @@ package com.mongodb.reactivestreams.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class UnifiedWriteConcernTest extends UnifiedReactiveStreamsTest { - public UnifiedWriteConcernTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, final String schemaVersion, final BsonArray runOnRequirements, - final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class UnifiedWriteConcernTest extends UnifiedReactiveStreamsTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/write-concern"); } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/VersionedApiTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/VersionedApiTest.java index b93ac19afd0..5a0d4b69dcd 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/VersionedApiTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/VersionedApiTest.java @@ -16,25 +16,14 @@ package com.mongodb.reactivestreams.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class VersionedApiTest extends UnifiedReactiveStreamsTest { - public VersionedApiTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class VersionedApiTest extends UnifiedReactiveStreamsTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("versioned-api"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ChangeStreamsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ChangeStreamsTest.java index ac8bda66f84..d07429fa479 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/ChangeStreamsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ChangeStreamsTest.java @@ -16,25 +16,14 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public final class ChangeStreamsTest extends UnifiedSyncTest { - public ChangeStreamsTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class ChangeStreamsTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/change-streams"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ClientSideEncryptionTest.java index 7840335f056..bbb232386dd 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/ClientSideEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ClientSideEncryptionTest.java @@ -16,27 +16,14 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class ClientSideEncryptionTest extends UnifiedSyncTest { - public ClientSideEncryptionTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class ClientSideEncryptionTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/client-side-encryption"); } } - diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/CollectionManagementTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/CollectionManagementTest.java index 672a09c5546..cbc371c1913 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/CollectionManagementTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/CollectionManagementTest.java @@ -16,28 +16,21 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class CollectionManagementTest extends UnifiedSyncTest { - public CollectionManagementTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class CollectionManagementTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeFalse(testDescription.equals("modifyCollection to changeStreamPreAndPostImages enabled")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/collection-management"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/CommandLoggingTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandLoggingTest.java index 7baf06c7c96..02fbae2fd40 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/CommandLoggingTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandLoggingTest.java @@ -16,35 +16,25 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; import static com.mongodb.ClusterFixture.isServerlessTest; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class CommandLoggingTest extends UnifiedSyncTest { - - - public CommandLoggingTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class CommandLoggingTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeFalse(isServerlessTest()); // The driver has a hack where getLastError command is executed as part of the handshake in order to get a connectionId // even when the hello command response doesn't contain it. assumeFalse(fileDescription.equals("pre-42-server-connection-id")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/command-logging"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/CommandMonitoringTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandMonitoringTest.java index 6a02669ac22..345639fba60 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/CommandMonitoringTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandMonitoringTest.java @@ -16,35 +16,25 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; import static com.mongodb.ClusterFixture.isServerlessTest; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class CommandMonitoringTest extends UnifiedSyncTest { - - - public CommandMonitoringTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class CommandMonitoringTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeFalse(isServerlessTest()); // The driver has a hack where getLastError command is executed as part of the handshake in order to get a connectionId // even when the hello command response doesn't contain it. assumeFalse(fileDescription.equals("pre-42-server-connection-id")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/command-monitoring"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ConnectionPoolLoggingTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ConnectionPoolLoggingTest.java index 62e81674653..8d34eb0a1fa 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/ConnectionPoolLoggingTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ConnectionPoolLoggingTest.java @@ -16,32 +16,23 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class ConnectionPoolLoggingTest extends UnifiedSyncTest { - - public ConnectionPoolLoggingTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class ConnectionPoolLoggingTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { // The implementation of the functionality related to clearing the connection pool before closing the connection // will be carried out once the specification is finalized and ready. assumeFalse(testDescription.equals("Connection checkout fails due to error establishing connection")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/connection-monitoring-and-pooling/logging"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/IndexManagmentTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/IndexManagmentTest.java index 3fdb04d3116..382c5edb3a4 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/IndexManagmentTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/IndexManagmentTest.java @@ -16,27 +16,14 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class IndexManagmentTest extends UnifiedSyncTest { - - public IndexManagmentTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class IndexManagmentTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/index-management"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/LoadBalancerTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/LoadBalancerTest.java index 891297bd364..eb70f5da4cf 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/LoadBalancerTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/LoadBalancerTest.java @@ -16,26 +16,14 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class LoadBalancerTest extends UnifiedSyncTest { - - public LoadBalancerTest(final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(fileDescription, schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class LoadBalancerTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/load-balancers"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ServerSelectionLoggingTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ServerSelectionLoggingTest.java index 4ce99fb76a8..2e932ba975f 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/ServerSelectionLoggingTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ServerSelectionLoggingTest.java @@ -16,25 +16,14 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public final class ServerSelectionLoggingTest extends UnifiedSyncTest { - public ServerSelectionLoggingTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class ServerSelectionLoggingTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/server-selection/logging"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/SessionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/SessionsTest.java index 3a607407138..33d851a38c9 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/SessionsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/SessionsTest.java @@ -16,25 +16,14 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class SessionsTest extends UnifiedSyncTest { - - public SessionsTest(@SuppressWarnings("unused") final String fileDescription, @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class SessionsTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/sessions"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAtlasDataLakeTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAtlasDataLakeTest.java index 9bc43e5f25d..7b3183f0fee 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAtlasDataLakeTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAtlasDataLakeTest.java @@ -16,30 +16,22 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; import static com.mongodb.ClusterFixture.isDataLakeTest; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; -public class UnifiedAtlasDataLakeTest extends UnifiedSyncTest { - - public UnifiedAtlasDataLakeTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, final String schemaVersion, - @Nullable final BsonArray runOnRequirements, final BsonArray entities, final BsonArray initialData, - final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class UnifiedAtlasDataLakeTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeTrue(isDataLakeTest()); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/atlas-data-lake-testing"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAuthTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAuthTest.java index f94977f2546..0471a9600c6 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAuthTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAuthTest.java @@ -16,24 +16,14 @@ package com.mongodb.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class UnifiedAuthTest extends UnifiedSyncTest { - public UnifiedAuthTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class UnifiedAuthTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/auth"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java index 410c6b9e0e9..5c494452823 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java @@ -16,10 +16,7 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; @@ -27,19 +24,10 @@ import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.serverVersionAtLeast; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedCrudTest extends UnifiedSyncTest { - - public UnifiedCrudTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - customSkips(fileDescription, testDescription); - } - - public static void customSkips(final String fileDescription, final String testDescription) { +public final class UnifiedCrudTest extends UnifiedSyncTest { + public static void doSkips(final String fileDescription, final String testDescription) { assumeFalse(testDescription.equals("Deprecated count with empty collection")); assumeFalse(testDescription.equals("Deprecated count with collation")); assumeFalse(testDescription.equals("Deprecated count without a filter")); @@ -57,7 +45,12 @@ public static void customSkips(final String fileDescription, final String testDe } } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + @Override + protected void skips(final String fileDescription, final String testDescription) { + doSkips(fileDescription, testDescription); + } + + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/crud"); - }} + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSTest.java index 5f0dfb36bb9..0413e2c0c0c 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSTest.java @@ -16,28 +16,22 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedGridFSTest extends UnifiedSyncTest { - - public UnifiedGridFSTest(@SuppressWarnings("unused") final String fileDescription, final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); +final class UnifiedGridFSTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { // contentType is deprecated in GridFS spec, and 4.x Java driver no longer support it, so skipping this test assumeFalse(testDescription.equals("upload when contentType is provided")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/gridfs"); - }} + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java index 4d50fd54577..62712eaab0e 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java @@ -16,24 +16,21 @@ package com.mongodb.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedRetryableReadsTest extends UnifiedSyncTest { - public UnifiedRetryableReadsTest(final String fileDescription, final String testDescription, final String schemaVersion, - final BsonArray runOnRequirements, final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - customSkips(fileDescription, testDescription); +public final class UnifiedRetryableReadsTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { + doSkips(fileDescription, testDescription); } - public static void customSkips(final String fileDescription, @SuppressWarnings("unused") final String testDescription) { + public static void doSkips(final String fileDescription, @SuppressWarnings("unused") final String testDescription) { // Skipped because driver removed the deprecated count methods assumeFalse(fileDescription.equals("count")); assumeFalse(fileDescription.equals("count-serverErrors")); @@ -44,8 +41,7 @@ public static void customSkips(final String fileDescription, @SuppressWarnings(" assumeFalse(fileDescription.equals("listCollectionObjects-serverErrors")); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/retryable-reads"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableWritesTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableWritesTest.java index 210295a30e6..794a027ebaf 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableWritesTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableWritesTest.java @@ -16,9 +16,7 @@ package com.mongodb.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; @@ -27,18 +25,15 @@ import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.isSharded; import static com.mongodb.ClusterFixture.serverVersionLessThan; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedRetryableWritesTest extends UnifiedSyncTest { - public UnifiedRetryableWritesTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - customSkips(testDescription); +public final class UnifiedRetryableWritesTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { + doSkips(testDescription); } - public static void customSkips(final String description) { + public static void doSkips(final String description) { if (isSharded() && serverVersionLessThan(5, 0)) { assumeFalse(description.contains("succeeds after WriteConcernError")); assumeFalse(description.contains("succeeds after retryable writeConcernError")); @@ -48,8 +43,7 @@ public static void customSkips(final String description) { } } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/retryable-writes"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java index 7f2b4bce607..c384a50967e 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java @@ -16,43 +16,31 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonString; -import org.junit.Before; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedServerDiscoveryAndMonitoringTest extends UnifiedSyncTest { - - public UnifiedServerDiscoveryAndMonitoringTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +public final class UnifiedServerDiscoveryAndMonitoringTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/server-discovery-and-monitoring"); } - @Before - public void before() { - skipTests(getDefinition()); + @Override + protected void skips(final String fileDescription, final String testDescription) { + doSkips(getDefinition()); } - public static void skipTests(final BsonDocument definition) { + public static void doSkips(final BsonDocument definition) { String description = definition.getString("description", new BsonString("")).getValue(); - assumeFalse("Skipping because our server monitoring events behave differently for now", - description.equals("connect with serverMonitoringMode=auto >=4.4")); - assumeFalse("Skipping because our server monitoring events behave differently for now", - description.equals("connect with serverMonitoringMode=stream >=4.4")); + assumeFalse(description.equals("connect with serverMonitoringMode=auto >=4.4"), + "Skipping because our server monitoring events behave differently for now"); + assumeFalse(description.equals("connect with serverMonitoringMode=stream >=4.4"), + "Skipping because our server monitoring events behave differently for now"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedSyncTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedSyncTest.java index 5e7a0e4ddc4..37db7cfe907 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedSyncTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedSyncTest.java @@ -25,19 +25,9 @@ import com.mongodb.client.gridfs.GridFSBuckets; import com.mongodb.client.internal.ClientEncryptionImpl; import com.mongodb.client.vault.ClientEncryption; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; public abstract class UnifiedSyncTest extends UnifiedTest { - public UnifiedSyncTest(@Nullable final String fileDescription, final String schemaVersion, final BsonArray runOnRequirements, - final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - super(fileDescription, schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - } - - public UnifiedSyncTest(final String schemaVersion, final BsonArray runOnRequirements, - final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - this(null, schemaVersion, runOnRequirements, entitiesArray, initialData, definition); + protected UnifiedSyncTest() { } @Override diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java index 46e47757ff6..e88abd6669f 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java @@ -41,6 +41,7 @@ import com.mongodb.internal.connection.TestConnectionPoolListener; import com.mongodb.lang.NonNull; import com.mongodb.lang.Nullable; +import com.mongodb.test.AfterBeforeParameterResolver; import org.bson.BsonArray; import org.bson.BsonBoolean; import org.bson.BsonDocument; @@ -49,12 +50,13 @@ import org.bson.BsonString; import org.bson.BsonValue; import org.bson.codecs.BsonDocumentCodec; -import org.junit.After; -import org.junit.AssumptionViolatedException; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.opentest4j.TestAbortedException; import java.io.File; import java.io.IOException; @@ -80,35 +82,36 @@ import static com.mongodb.client.unified.RunOnRequirementsMatcher.runOnRequirementsMet; import static java.util.Collections.singletonList; import static java.util.stream.Collectors.toList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; import static util.JsonPoweredTestHelper.getTestDocument; import static util.JsonPoweredTestHelper.getTestFiles; -@RunWith(Parameterized.class) +@ExtendWith(AfterBeforeParameterResolver.class) public abstract class UnifiedTest { private static final Set PRESTART_POOL_ASYNC_WORK_MANAGER_FILE_DESCRIPTIONS = Collections.singleton( "wait queue timeout errors include details about checked out connections"); @Nullable - private final String fileDescription; - private final String schemaVersion; - private final BsonArray runOnRequirements; - private final BsonArray entitiesArray; - private final BsonArray initialData; - private final BsonDocument definition; - private final Entities entities = new Entities(); - private final UnifiedCrudHelper crudHelper; - private final UnifiedGridFSHelper gridFSHelper = new UnifiedGridFSHelper(entities); - private final UnifiedClientEncryptionHelper clientEncryptionHelper = new UnifiedClientEncryptionHelper(entities); - private final List failPoints = new ArrayList<>(); - private final UnifiedTestContext rootContext = new UnifiedTestContext(); + private String fileDescription; + private String schemaVersion; + @Nullable + private BsonArray runOnRequirements; + private BsonArray entitiesArray; + private BsonArray initialData; + private BsonDocument definition; + private Entities entities; + private UnifiedCrudHelper crudHelper; + private UnifiedGridFSHelper gridFSHelper; + private UnifiedClientEncryptionHelper clientEncryptionHelper; + private List failPoints; + private UnifiedTestContext rootContext; private boolean ignoreExtraEvents; private BsonDocument startingClusterTime; @@ -140,16 +143,7 @@ LogMatcher getLogMatcher() { } } - public UnifiedTest(@Nullable final String fileDescription, final String schemaVersion, @Nullable final BsonArray runOnRequirements, - final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - this.fileDescription = fileDescription; - this.schemaVersion = schemaVersion; - this.runOnRequirements = runOnRequirements; - this.entitiesArray = entitiesArray; - this.initialData = initialData; - this.definition = definition; - this.rootContext.getAssertionContext().push(ContextElement.ofTest(definition)); - crudHelper = new UnifiedCrudHelper(entities, definition.getString("description").getValue()); + protected UnifiedTest() { } protected void ignoreExtraEvents() { @@ -161,8 +155,8 @@ public Entities getEntities() { } @NonNull - protected static Collection getTestData(final String directory) throws URISyntaxException, IOException { - List data = new ArrayList<>(); + protected static Collection getTestData(final String directory) throws URISyntaxException, IOException { + List data = new ArrayList<>(); for (File file : getTestFiles("/" + directory + "/")) { BsonDocument fileDocument = getTestDocument(file); @@ -174,15 +168,15 @@ protected static Collection getTestData(final String directory) throws } @NonNull - private static Object[] createTestData(final BsonDocument fileDocument, final BsonDocument testDocument) { - return new Object[]{ + private static Arguments createTestData(final BsonDocument fileDocument, final BsonDocument testDocument) { + return Arguments.of( fileDocument.getString("description").getValue(), testDocument.getString("description").getValue(), fileDocument.getString("schemaVersion").getValue(), fileDocument.getArray("runOnRequirements", null), fileDocument.getArray("createEntities", new BsonArray()), fileDocument.getArray("initialData", new BsonArray()), - testDocument}; + testDocument); } protected BsonDocument getDefinition() { @@ -195,9 +189,31 @@ protected BsonDocument getDefinition() { protected abstract ClientEncryption createClientEncryption(MongoClient keyVaultClient, ClientEncryptionSettings clientEncryptionSettings); - @Before - public void setUp() { - assertTrue(String.format("Unsupported schema version %s", schemaVersion), + @BeforeEach + public void setUp( + @Nullable final String fileDescription, + @Nullable final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + this.fileDescription = fileDescription; + this.schemaVersion = schemaVersion; + this.runOnRequirements = runOnRequirements; + this.entitiesArray = entitiesArray; + this.initialData = initialData; + this.definition = definition; + entities = new Entities(); + crudHelper = new UnifiedCrudHelper(entities, definition.getString("description").getValue()); + gridFSHelper = new UnifiedGridFSHelper(entities); + clientEncryptionHelper = new UnifiedClientEncryptionHelper(entities); + failPoints = new ArrayList<>(); + rootContext = new UnifiedTestContext(); + rootContext.getAssertionContext().push(ContextElement.ofTest(definition)); + ignoreExtraEvents = false; + skips(fileDescription, testDescription); + assertTrue( schemaVersion.equals("1.0") || schemaVersion.equals("1.1") || schemaVersion.equals("1.2") @@ -217,18 +233,19 @@ public void setUp() { || schemaVersion.equals("1.16") || schemaVersion.equals("1.17") || schemaVersion.equals("1.18") - || schemaVersion.equals("1.19")); + || schemaVersion.equals("1.19"), + String.format("Unsupported schema version %s", schemaVersion)); if (runOnRequirements != null) { - assumeTrue("Run-on requirements not met", - runOnRequirementsMet(runOnRequirements, getMongoClientSettings(), getServerVersion())); + assumeTrue(runOnRequirementsMet(runOnRequirements, getMongoClientSettings(), getServerVersion()), + "Run-on requirements not met"); } if (definition.containsKey("runOnRequirements")) { - assumeTrue("Run-on requirements not met", - runOnRequirementsMet(definition.getArray("runOnRequirements", new BsonArray()), getMongoClientSettings(), - getServerVersion())); + assumeTrue(runOnRequirementsMet(definition.getArray("runOnRequirements", new BsonArray()), getMongoClientSettings(), + getServerVersion()), + "Run-on requirements not met"); } if (definition.containsKey("skipReason")) { - throw new AssumptionViolatedException(definition.getString("skipReason").getValue()); + throw new TestAbortedException(definition.getString("skipReason").getValue()); } if (!isDataLakeTest()) { @@ -244,7 +261,7 @@ public void setUp() { this::createClientEncryption); } - @After + @AfterEach public void cleanUp() { for (FailPoint failPoint : failPoints) { failPoint.disableFailPoint(); @@ -252,8 +269,23 @@ public void cleanUp() { entities.close(); } - @Test - public void shouldPassAllOutcomes() { + /** + * This method is called once per {@link #setUp(String, String, String, BsonArray, BsonArray, BsonArray, BsonDocument)}, + * unless {@link #setUp(String, String, String, BsonArray, BsonArray, BsonArray, BsonDocument)} fails unexpectedly. + */ + protected void skips(final String fileDescription, final String testDescription) { + } + + @ParameterizedTest(name = "{0}: {1}") + @MethodSource("data") + public void shouldPassAllOutcomes( + @Nullable final String fileDescription, + @Nullable final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { BsonArray operations = definition.getArray("operations"); for (int i = 0; i < operations.size(); i++) { BsonValue cur = operations.get(i); @@ -326,7 +358,7 @@ private void assertOutcome(final UnifiedTestContext context) { List expectedOutcome = curDocument.getArray("documents").stream().map(BsonValue::asDocument).collect(toList()); List actualOutcome = new CollectionHelper<>(new BsonDocumentCodec(), namespace).find(); context.getAssertionContext().push(ContextElement.ofOutcome(namespace, expectedOutcome, actualOutcome)); - assertEquals(context.getAssertionContext().getMessage("Outcomes are not equal"), expectedOutcome, actualOutcome); + assertEquals(expectedOutcome, actualOutcome, context.getAssertionContext().getMessage("Outcomes are not equal")); context.getAssertionContext().pop(); } } @@ -350,16 +382,16 @@ private static void assertOperationResult(final UnifiedTestContext context, fina context.getAssertionContext().push(ContextElement.ofCompletedOperation(operation, result, operationIndex)); if (!operation.getBoolean("ignoreResultAndError", BsonBoolean.FALSE).getValue()) { if (operation.containsKey("expectResult")) { - assertNull(context.getAssertionContext().getMessage("The operation expects a result but an exception occurred"), - result.getException()); + assertNull(result.getException(), + context.getAssertionContext().getMessage("The operation expects a result but an exception occurred")); context.getValueMatcher().assertValuesMatch(operation.get("expectResult"), result.getResult()); } else if (operation.containsKey("expectError")) { - assertNotNull(context.getAssertionContext().getMessage("The operation expects an error but no exception was thrown"), - result.getException()); + assertNotNull(result.getException(), + context.getAssertionContext().getMessage("The operation expects an error but no exception was thrown")); context.getErrorMatcher().assertErrorsMatch(operation.getDocument("expectError"), result.getException()); } else { - assertNull(context.getAssertionContext().getMessage("The operation expects no error but an exception occurred"), - result.getException()); + assertNull(result.getException(), + context.getAssertionContext().getMessage("The operation expects no error but an exception occurred")); } } context.getAssertionContext().pop(); @@ -776,8 +808,8 @@ private OperationResult executeAssertTopologyType(final UnifiedTestContext conte context.getAssertionContext().push(ContextElement.ofTopologyType(expectedTopologyType)); - assertEquals(context.getAssertionContext().getMessage("Unexpected topology type"), getClusterType(expectedTopologyType), - clusterDescription.getType()); + assertEquals(getClusterType(expectedTopologyType), clusterDescription.getType(), + context.getAssertionContext().getMessage("Unexpected topology type")); context.getAssertionContext().pop(); return OperationResult.NONE; @@ -860,8 +892,8 @@ private OperationResult executeAssertSessionPinniness(final BsonDocument operati private OperationResult executeAssertNumberConnectionsCheckedOut(final UnifiedTestContext context, final BsonDocument operation) { TestConnectionPoolListener listener = entities.getConnectionPoolListener( operation.getDocument("arguments").getString("client").getValue()); - assertEquals(context.getAssertionContext().getMessage("Number of checked out connections must match expected"), - operation.getDocument("arguments").getNumber("connections").intValue(), listener.getNumConnectionsCheckedOut()); + assertEquals(operation.getDocument("arguments").getNumber("connections").intValue(), listener.getNumConnectionsCheckedOut(), + context.getAssertionContext().getMessage("Number of checked out connections must match expected")); return OperationResult.NONE; } @@ -883,9 +915,9 @@ private OperationResult executeAssertLsidOnLastTwoCommands(final BsonDocument op BsonDocument expected = ((CommandStartedEvent) events.get(0)).getCommand().getDocument("lsid"); BsonDocument actual = ((CommandStartedEvent) events.get(1)).getCommand().getDocument("lsid"); if (same) { - assertEquals(eventsJson, expected, actual); + assertEquals(expected, actual, eventsJson); } else { - assertNotEquals(eventsJson, expected, actual); + assertNotEquals(expected, actual, eventsJson); } return OperationResult.NONE; } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestFailureValidator.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestFailureValidator.java index 0e397b7af38..2694ee8066e 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestFailureValidator.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestFailureValidator.java @@ -19,49 +19,73 @@ import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; -import org.junit.Before; -import org.junit.Test; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotNull; -public class UnifiedTestFailureValidator extends UnifiedSyncTest { +final class UnifiedTestFailureValidator extends UnifiedSyncTest { private Throwable exception; - public UnifiedTestFailureValidator(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Before - public void setUp() { + @Override + @BeforeEach + public void setUp( + @Nullable final String fileDescription, + @Nullable final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { try { - super.setUp(); + super.setUp( + fileDescription, + testDescription, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); } catch (AssertionError | Exception e) { exception = e; } } - @Test - public void shouldPassAllOutcomes() { + @Override + @ParameterizedTest + @MethodSource("data") + public void shouldPassAllOutcomes( + @Nullable final String fileDescription, + @Nullable final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { if (exception == null) { try { - super.shouldPassAllOutcomes(); + super.shouldPassAllOutcomes( + fileDescription, + testDescription, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); } catch (AssertionError | Exception e) { exception = e; } } - assertNotNull("Expected exception but not was thrown", exception); + assertNotNull(exception, "Expected exception but not was thrown"); } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/valid-fail"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestValidator.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestValidator.java index eaf9ebe3395..ecb04294bf8 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestValidator.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestValidator.java @@ -16,37 +16,24 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.Before; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; import static com.mongodb.ClusterFixture.serverVersionLessThan; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedTestValidator extends UnifiedSyncTest { - public UnifiedTestValidator(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - assumeFalse("MongoDB releases prior to 4.4 incorrectly add errorLabels as a field within the writeConcernError document " - + "instead of as a top-level field. Rather than handle that in code, we skip the test on older server versions.", - testDescription.equals("InsertOne fails after multiple retryable writeConcernErrors") && serverVersionLessThan(4, 4)); +final class UnifiedTestValidator extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { + assumeFalse(testDescription.equals("InsertOne fails after multiple retryable writeConcernErrors") && serverVersionLessThan(4, 4), + "MongoDB releases prior to 4.4 incorrectly add errorLabels as a field within the writeConcernError document " + + "instead of as a top-level field. Rather than handle that in code, we skip the test on older server versions."); } - @Before - public void setUp() { - super.setUp(); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/valid-pass"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTransactionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTransactionsTest.java index 270c52600d1..5acf74cd972 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTransactionsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTransactionsTest.java @@ -16,9 +16,7 @@ package com.mongodb.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; @@ -26,14 +24,11 @@ import static com.mongodb.ClusterFixture.isSharded; import static com.mongodb.ClusterFixture.serverVersionLessThan; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeFalse; -public class UnifiedTransactionsTest extends UnifiedSyncTest { - public UnifiedTransactionsTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); +final class UnifiedTransactionsTest extends UnifiedSyncTest { + @Override + protected void skips(final String fileDescription, final String testDescription) { assumeFalse(fileDescription.equals("count")); if (serverVersionLessThan(4, 4) && isSharded()) { assumeFalse(fileDescription.equals("pin-mongos") && testDescription.equals("distinct")); @@ -43,8 +38,7 @@ public UnifiedTransactionsTest(@SuppressWarnings("unused") final String fileDesc } } - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/transactions"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedWriteConcernTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedWriteConcernTest.java index 77da717f086..4d1a5a2f854 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedWriteConcernTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedWriteConcernTest.java @@ -16,23 +16,14 @@ package com.mongodb.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class UnifiedWriteConcernTest extends UnifiedSyncTest { - public UnifiedWriteConcernTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, final String schemaVersion, final BsonArray runOnRequirements, - final BsonArray entitiesArray, final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class UnifiedWriteConcernTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/write-concern"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/VersionedApiTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/VersionedApiTest.java index 81a20662f38..e9ccd4d1cd4 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/VersionedApiTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/VersionedApiTest.java @@ -16,26 +16,14 @@ package com.mongodb.client.unified; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class VersionedApiTest extends UnifiedSyncTest { - - public VersionedApiTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, @Nullable final BsonArray runOnRequirements, final BsonArray entities, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entities, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class VersionedApiTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("versioned-api"); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/WithTransactionHelperTransactionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/WithTransactionHelperTransactionsTest.java index dff641068e4..d9fb4c9b4df 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/WithTransactionHelperTransactionsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/WithTransactionHelperTransactionsTest.java @@ -16,24 +16,14 @@ package com.mongodb.client.unified; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.provider.Arguments; import java.io.IOException; import java.net.URISyntaxException; import java.util.Collection; -public class WithTransactionHelperTransactionsTest extends UnifiedSyncTest { - public WithTransactionHelperTransactionsTest(@SuppressWarnings("unused") final String fileDescription, - @SuppressWarnings("unused") final String testDescription, - final String schemaVersion, final BsonArray runOnRequirements, final BsonArray entitiesArray, - final BsonArray initialData, final BsonDocument definition) { - super(schemaVersion, runOnRequirements, entitiesArray, initialData, definition); - } - - @Parameterized.Parameters(name = "{0}: {1}") - public static Collection data() throws URISyntaxException, IOException { +final class WithTransactionHelperTransactionsTest extends UnifiedSyncTest { + private static Collection data() throws URISyntaxException, IOException { return getTestData("unified-test-format/transactions-convenient-api"); } } diff --git a/driver-workload-executor/src/main/com/mongodb/workload/WorkloadExecutor.java b/driver-workload-executor/src/main/com/mongodb/workload/WorkloadExecutor.java index 1f0e17cfed2..88248e13ca3 100644 --- a/driver-workload-executor/src/main/com/mongodb/workload/WorkloadExecutor.java +++ b/driver-workload-executor/src/main/com/mongodb/workload/WorkloadExecutor.java @@ -85,12 +85,7 @@ public static void main(String[] args) throws IOException { } BsonDocument testDocument = testArray.get(0).asDocument(); - UnifiedTest unifiedTest = new UnifiedSyncTest(fileDocument.getString("schemaVersion").getValue(), - fileDocument.getArray("runOnRequirements", null), - fileDocument.getArray("createEntities", new BsonArray()), - fileDocument.getArray("initialData", new BsonArray()), - testDocument) { - + UnifiedTest unifiedTest = new UnifiedSyncTest() { @Override protected boolean terminateLoop() { return terminateLoop; @@ -98,8 +93,25 @@ protected boolean terminateLoop() { }; try { - unifiedTest.setUp(); - unifiedTest.shouldPassAllOutcomes(); + String schemaVersion = fileDocument.getString("schemaVersion").getValue(); + BsonArray runOnRequirements = fileDocument.getArray("runOnRequirements", null); + BsonArray createEntities = fileDocument.getArray("createEntities", new BsonArray()); + BsonArray initialData = fileDocument.getArray("initialData", new BsonArray()); + unifiedTest.setUp(null, + null, + schemaVersion, + runOnRequirements, + createEntities, + initialData, + testDocument); + unifiedTest.shouldPassAllOutcomes( + null, + null, + schemaVersion, + runOnRequirements, + createEntities, + initialData, + testDocument); Entities entities = unifiedTest.getEntities(); long iterationCount = -1; From 099ec1eae36b257896e6601b1c8de65192fbdeb6 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Fri, 12 Jul 2024 10:55:50 -0600 Subject: [PATCH 42/90] Fix `CursorResourceManager.close` (#1440) JAVA-5516 --- .../operation/CursorResourceManager.java | 2 +- .../operation/CursorResourceManagerTest.java | 59 +++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java diff --git a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java index cb2e5c58e84..7aeaad49118 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java @@ -182,7 +182,7 @@ void endOperation() { void close() { boolean doClose = withLock(lock, () -> { State localState = state; - if (localState == State.OPERATION_IN_PROGRESS) { + if (localState.inProgress()) { state = State.CLOSE_PENDING; } else if (localState != State.CLOSED) { state = State.CLOSED; diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java new file mode 100644 index 00000000000..15a8bd972f1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.ServerCursor; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.mockito.MongoMockito; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.mockito.Mockito.when; + +final class CursorResourceManagerTest { + @Test + void doubleCloseExecutedConcurrentlyWithOperationBeingInProgressShouldNotFail() { + CursorResourceManager cursorResourceManager = new CursorResourceManager( + new MongoNamespace("db", "coll"), + MongoMockito.mock(AsyncConnectionSource.class, mock -> { + when(mock.retain()).thenReturn(mock); + when(mock.release()).thenReturn(1); + }), + null, + MongoMockito.mock(ServerCursor.class)) { + @Override + void markAsPinned(final ReferenceCounted connectionToPin, final Connection.PinningMode pinningMode) { + } + + @Override + void doClose() { + } + }; + cursorResourceManager.tryStartOperation(); + try { + assertDoesNotThrow(() -> { + cursorResourceManager.close(); + cursorResourceManager.close(); + cursorResourceManager.setServerCursor(null); + }); + } finally { + cursorResourceManager.endOperation(); + } + } +} From 18a6c9c0ad108d5818770ec2a1fdb8cb30a36a41 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Tue, 16 Jul 2024 08:04:23 -0700 Subject: [PATCH 43/90] Enhance KotlinSerializer with value codecs for widening primitive conversion. (#1301) JAVA-5303 --- .../org/bson/codecs/kotlinx/BsonDecoder.kt | 21 +++--- .../kotlinx/KotlinSerializerCodecTest.kt | 69 ++++++++++++++++--- .../org/bson/codecs/AtomicIntegerCodec.java | 2 +- .../main/org/bson/codecs/AtomicLongCodec.java | 2 +- bson/src/main/org/bson/codecs/ByteCodec.java | 10 +-- .../main/org/bson/codecs/CharacterCodec.java | 11 +-- .../src/main/org/bson/codecs/DoubleCodec.java | 2 +- bson/src/main/org/bson/codecs/FloatCodec.java | 10 +-- .../main/org/bson/codecs/IntegerCodec.java | 2 +- bson/src/main/org/bson/codecs/LongCodec.java | 2 +- bson/src/main/org/bson/codecs/ShortCodec.java | 10 +-- .../NumberCodecHelper.java | 37 ++++++++-- .../org/bson/internal/StringCodecHelper.java | 46 +++++++++++++ config/spotbugs/exclude.xml | 6 ++ 14 files changed, 170 insertions(+), 60 deletions(-) rename bson/src/main/org/bson/{codecs => internal}/NumberCodecHelper.java (78%) create mode 100644 bson/src/main/org/bson/internal/StringCodecHelper.java diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt index 435964d4ac0..38d9c23309f 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt @@ -36,6 +36,8 @@ import org.bson.BsonType import org.bson.BsonValue import org.bson.codecs.BsonValueCodec import org.bson.codecs.DecoderContext +import org.bson.internal.NumberCodecHelper +import org.bson.internal.StringCodecHelper import org.bson.types.ObjectId /** @@ -154,14 +156,17 @@ internal open class DefaultBsonDecoder( } } - override fun decodeByte(): Byte = decodeInt().toByte() - override fun decodeChar(): Char = decodeString().single() - override fun decodeFloat(): Float = decodeDouble().toFloat() - override fun decodeShort(): Short = decodeInt().toShort() - override fun decodeBoolean(): Boolean = readOrThrow({ reader.readBoolean() }, BsonType.BOOLEAN) - override fun decodeDouble(): Double = readOrThrow({ reader.readDouble() }, BsonType.DOUBLE) - override fun decodeInt(): Int = readOrThrow({ reader.readInt32() }, BsonType.INT32) - override fun decodeLong(): Long = readOrThrow({ reader.readInt64() }, BsonType.INT64) + override fun decodeByte(): Byte = NumberCodecHelper.decodeByte(reader) + + override fun decodeChar(): Char = StringCodecHelper.decodeChar(reader) + override fun decodeFloat(): Float = NumberCodecHelper.decodeFloat(reader) + + override fun decodeShort(): Short = NumberCodecHelper.decodeShort(reader) + override fun decodeBoolean(): Boolean = reader.readBoolean() + + override fun decodeDouble(): Double = NumberCodecHelper.decodeDouble(reader) + override fun decodeInt(): Int = NumberCodecHelper.decodeInt(reader) + override fun decodeLong(): Long = NumberCodecHelper.decodeLong(reader) override fun decodeString(): String = readOrThrow({ reader.readString() }, BsonType.STRING) override fun decodeNull(): Nothing? { diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt index 0aed60b27ba..30fc6f7fbb4 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt @@ -15,6 +15,7 @@ */ package org.bson.codecs.kotlinx +import java.util.stream.Stream import kotlin.test.assertEquals import kotlinx.serialization.ExperimentalSerializationApi import kotlinx.serialization.MissingFieldException @@ -23,12 +24,17 @@ import kotlinx.serialization.modules.SerializersModule import kotlinx.serialization.modules.plus import kotlinx.serialization.modules.polymorphic import kotlinx.serialization.modules.subclass +import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonDocumentReader import org.bson.BsonDocumentWriter +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonInt64 import org.bson.BsonInvalidOperationException import org.bson.BsonMaxKey import org.bson.BsonMinKey +import org.bson.BsonString import org.bson.BsonUndefined import org.bson.codecs.DecoderContext import org.bson.codecs.EncoderContext @@ -90,11 +96,12 @@ import org.bson.codecs.kotlinx.samples.SealedInterface import org.bson.codecs.kotlinx.samples.ValueClass import org.junit.jupiter.api.Test import org.junit.jupiter.api.assertThrows +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource @OptIn(ExperimentalSerializationApi::class) @Suppress("LargeClass") class KotlinSerializerCodecTest { - private val numberLong = "\$numberLong" private val oid = "\$oid" private val emptyDocument = "{}" private val altConfiguration = @@ -134,15 +141,59 @@ class KotlinSerializerCodecTest { private val allBsonTypesDocument = BsonDocument.parse(allBsonTypesJson) - @Test - fun testDataClassWithSimpleValues() { - val expected = - """{"char": "c", "byte": 0, "short": 1, "int": 22, "long": {"$numberLong": "42"}, "float": 4.0, - | "double": 4.2, "boolean": true, "string": "String"}""" - .trimMargin() - val dataClass = DataClassWithSimpleValues('c', 0, 1, 22, 42L, 4.0f, 4.2, true, "String") + companion object { + @JvmStatic + fun testTypesCastingDataClassWithSimpleValues(): Stream { + return Stream.of( + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonInt32(1)) + .append("short", BsonInt32(2)) + .append("int", BsonInt32(10)) + .append("long", BsonInt32(10)) + .append("float", BsonInt32(2)) + .append("double", BsonInt32(3)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String")), + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonDouble(1.0)) + .append("short", BsonDouble(2.0)) + .append("int", BsonDouble(9.9999999999999992)) + .append("long", BsonDouble(9.9999999999999992)) + .append("float", BsonDouble(2.0)) + .append("double", BsonDouble(3.0)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String")), + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonDouble(1.0)) + .append("short", BsonDouble(2.0)) + .append("int", BsonDouble(10.0)) + .append("long", BsonDouble(10.0)) + .append("float", BsonDouble(2.0)) + .append("double", BsonDouble(3.0)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String")), + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonInt64(1)) + .append("short", BsonInt64(2)) + .append("int", BsonInt64(10)) + .append("long", BsonInt64(10)) + .append("float", BsonInt64(2)) + .append("double", BsonInt64(3)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String"))) + } + } - assertRoundTrips(expected, dataClass) + @ParameterizedTest + @MethodSource("testTypesCastingDataClassWithSimpleValues") + fun testTypesCastingDataClassWithSimpleValues(data: BsonDocument) { + val expectedDataClass = DataClassWithSimpleValues('c', 1, 2, 10, 10L, 2.0f, 3.0, true, "String") + + assertDecodesTo(data, expectedDataClass) } @Test diff --git a/bson/src/main/org/bson/codecs/AtomicIntegerCodec.java b/bson/src/main/org/bson/codecs/AtomicIntegerCodec.java index 8fd3e55876b..d8963ed40d7 100644 --- a/bson/src/main/org/bson/codecs/AtomicIntegerCodec.java +++ b/bson/src/main/org/bson/codecs/AtomicIntegerCodec.java @@ -21,7 +21,7 @@ import java.util.concurrent.atomic.AtomicInteger; -import static org.bson.codecs.NumberCodecHelper.decodeInt; +import static org.bson.internal.NumberCodecHelper.decodeInt; /** * Encodes and decodes {@code AtomicInteger} objects. diff --git a/bson/src/main/org/bson/codecs/AtomicLongCodec.java b/bson/src/main/org/bson/codecs/AtomicLongCodec.java index c6e053c6d9f..7f08af77961 100644 --- a/bson/src/main/org/bson/codecs/AtomicLongCodec.java +++ b/bson/src/main/org/bson/codecs/AtomicLongCodec.java @@ -21,7 +21,7 @@ import java.util.concurrent.atomic.AtomicLong; -import static org.bson.codecs.NumberCodecHelper.decodeLong; +import static org.bson.internal.NumberCodecHelper.decodeLong; /** * Encodes and decodes {@code AtomicLong} objects. diff --git a/bson/src/main/org/bson/codecs/ByteCodec.java b/bson/src/main/org/bson/codecs/ByteCodec.java index 26b5005ea66..e7011f8b58d 100644 --- a/bson/src/main/org/bson/codecs/ByteCodec.java +++ b/bson/src/main/org/bson/codecs/ByteCodec.java @@ -16,12 +16,10 @@ package org.bson.codecs; -import org.bson.BsonInvalidOperationException; import org.bson.BsonReader; import org.bson.BsonWriter; -import static java.lang.String.format; -import static org.bson.codecs.NumberCodecHelper.decodeInt; +import static org.bson.internal.NumberCodecHelper.decodeByte; /** * Encodes and decodes {@code Byte} objects. @@ -37,11 +35,7 @@ public void encode(final BsonWriter writer, final Byte value, final EncoderConte @Override public Byte decode(final BsonReader reader, final DecoderContext decoderContext) { - int value = decodeInt(reader); - if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { - throw new BsonInvalidOperationException(format("%s can not be converted into a Byte.", value)); - } - return (byte) value; + return decodeByte(reader); } @Override diff --git a/bson/src/main/org/bson/codecs/CharacterCodec.java b/bson/src/main/org/bson/codecs/CharacterCodec.java index 0a9e6252056..4ad6efa2663 100644 --- a/bson/src/main/org/bson/codecs/CharacterCodec.java +++ b/bson/src/main/org/bson/codecs/CharacterCodec.java @@ -16,11 +16,10 @@ package org.bson.codecs; -import org.bson.BsonInvalidOperationException; import org.bson.BsonReader; import org.bson.BsonWriter; +import org.bson.internal.StringCodecHelper; -import static java.lang.String.format; import static org.bson.assertions.Assertions.notNull; /** @@ -38,13 +37,7 @@ public void encode(final BsonWriter writer, final Character value, final Encoder @Override public Character decode(final BsonReader reader, final DecoderContext decoderContext) { - String string = reader.readString(); - if (string.length() != 1) { - throw new BsonInvalidOperationException(format("Attempting to decode the string '%s' to a character, but its length is not " - + "equal to one", string)); - } - - return string.charAt(0); + return StringCodecHelper.decodeChar(reader); } @Override diff --git a/bson/src/main/org/bson/codecs/DoubleCodec.java b/bson/src/main/org/bson/codecs/DoubleCodec.java index 523042bb163..33e3f6782bd 100644 --- a/bson/src/main/org/bson/codecs/DoubleCodec.java +++ b/bson/src/main/org/bson/codecs/DoubleCodec.java @@ -19,7 +19,7 @@ import org.bson.BsonReader; import org.bson.BsonWriter; -import static org.bson.codecs.NumberCodecHelper.decodeDouble; +import static org.bson.internal.NumberCodecHelper.decodeDouble; /** * Encodes and decodes {@code Double} objects. diff --git a/bson/src/main/org/bson/codecs/FloatCodec.java b/bson/src/main/org/bson/codecs/FloatCodec.java index 84b85c5aa1b..49dc7e22aff 100644 --- a/bson/src/main/org/bson/codecs/FloatCodec.java +++ b/bson/src/main/org/bson/codecs/FloatCodec.java @@ -16,12 +16,10 @@ package org.bson.codecs; -import org.bson.BsonInvalidOperationException; import org.bson.BsonReader; import org.bson.BsonWriter; -import static java.lang.String.format; -import static org.bson.codecs.NumberCodecHelper.decodeDouble; +import static org.bson.internal.NumberCodecHelper.decodeFloat; /** * Encodes and decodes {@code Float} objects. @@ -37,11 +35,7 @@ public void encode(final BsonWriter writer, final Float value, final EncoderCont @Override public Float decode(final BsonReader reader, final DecoderContext decoderContext) { - double value = decodeDouble(reader); - if (value < -Float.MAX_VALUE || value > Float.MAX_VALUE) { - throw new BsonInvalidOperationException(format("%s can not be converted into a Float.", value)); - } - return (float) value; + return decodeFloat(reader); } @Override diff --git a/bson/src/main/org/bson/codecs/IntegerCodec.java b/bson/src/main/org/bson/codecs/IntegerCodec.java index dee6e2512fb..bb0c5c082d5 100644 --- a/bson/src/main/org/bson/codecs/IntegerCodec.java +++ b/bson/src/main/org/bson/codecs/IntegerCodec.java @@ -19,7 +19,7 @@ import org.bson.BsonReader; import org.bson.BsonWriter; -import static org.bson.codecs.NumberCodecHelper.decodeInt; +import static org.bson.internal.NumberCodecHelper.decodeInt; /** * Encodes and decodes {@code Integer} objects. diff --git a/bson/src/main/org/bson/codecs/LongCodec.java b/bson/src/main/org/bson/codecs/LongCodec.java index 29adc373488..0e16e4430bc 100644 --- a/bson/src/main/org/bson/codecs/LongCodec.java +++ b/bson/src/main/org/bson/codecs/LongCodec.java @@ -19,7 +19,7 @@ import org.bson.BsonReader; import org.bson.BsonWriter; -import static org.bson.codecs.NumberCodecHelper.decodeLong; +import static org.bson.internal.NumberCodecHelper.decodeLong; /** * Encodes and decodes {@code Long} objects. diff --git a/bson/src/main/org/bson/codecs/ShortCodec.java b/bson/src/main/org/bson/codecs/ShortCodec.java index e5aaf8f9acb..8c439e36b8d 100644 --- a/bson/src/main/org/bson/codecs/ShortCodec.java +++ b/bson/src/main/org/bson/codecs/ShortCodec.java @@ -16,12 +16,10 @@ package org.bson.codecs; -import org.bson.BsonInvalidOperationException; import org.bson.BsonReader; import org.bson.BsonWriter; -import static java.lang.String.format; -import static org.bson.codecs.NumberCodecHelper.decodeInt; +import static org.bson.internal.NumberCodecHelper.decodeShort; /** * Encodes and decodes {@code Short} objects. @@ -37,11 +35,7 @@ public void encode(final BsonWriter writer, final Short value, final EncoderCont @Override public Short decode(final BsonReader reader, final DecoderContext decoderContext) { - int value = decodeInt(reader); - if (value < Short.MIN_VALUE || value > Short.MAX_VALUE) { - throw new BsonInvalidOperationException(format("%s can not be converted into a Short.", value)); - } - return (short) value; + return decodeShort(reader); } @Override diff --git a/bson/src/main/org/bson/codecs/NumberCodecHelper.java b/bson/src/main/org/bson/internal/NumberCodecHelper.java similarity index 78% rename from bson/src/main/org/bson/codecs/NumberCodecHelper.java rename to bson/src/main/org/bson/internal/NumberCodecHelper.java index 69dfe29ac7e..faf63e56eb5 100644 --- a/bson/src/main/org/bson/codecs/NumberCodecHelper.java +++ b/bson/src/main/org/bson/internal/NumberCodecHelper.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.bson.codecs; +package org.bson.internal; import org.bson.BsonInvalidOperationException; import org.bson.BsonReader; @@ -25,9 +25,28 @@ import static java.lang.String.format; -final class NumberCodecHelper { +/** + * This class is not part of the public API. It may be removed or changed at any time. + */ +public final class NumberCodecHelper { + + public static byte decodeByte(final BsonReader reader) { + int value = decodeInt(reader); + if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { + throw new BsonInvalidOperationException(format("%s can not be converted into a Byte.", value)); + } + return (byte) value; + } + + public static short decodeShort(final BsonReader reader) { + int value = decodeInt(reader); + if (value < Short.MIN_VALUE || value > Short.MAX_VALUE) { + throw new BsonInvalidOperationException(format("%s can not be converted into a Short.", value)); + } + return (short) value; + } - static int decodeInt(final BsonReader reader) { + public static int decodeInt(final BsonReader reader) { int intValue; BsonType bsonType = reader.getCurrentBsonType(); switch (bsonType) { @@ -61,7 +80,7 @@ static int decodeInt(final BsonReader reader) { return intValue; } - static long decodeLong(final BsonReader reader) { + public static long decodeLong(final BsonReader reader) { long longValue; BsonType bsonType = reader.getCurrentBsonType(); switch (bsonType) { @@ -91,7 +110,15 @@ static long decodeLong(final BsonReader reader) { return longValue; } - static double decodeDouble(final BsonReader reader) { + public static float decodeFloat(final BsonReader reader) { + double value = decodeDouble(reader); + if (value < -Float.MAX_VALUE || value > Float.MAX_VALUE) { + throw new BsonInvalidOperationException(format("%s can not be converted into a Float.", value)); + } + return (float) value; + } + + public static double decodeDouble(final BsonReader reader) { double doubleValue; BsonType bsonType = reader.getCurrentBsonType(); switch (bsonType) { diff --git a/bson/src/main/org/bson/internal/StringCodecHelper.java b/bson/src/main/org/bson/internal/StringCodecHelper.java new file mode 100644 index 00000000000..04225aad939 --- /dev/null +++ b/bson/src/main/org/bson/internal/StringCodecHelper.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonType; + +import static java.lang.String.format; + +/** + * This class is not part of the public API. It may be removed or changed at any time. + */ +public final class StringCodecHelper { + + private StringCodecHelper(){ + //NOP + } + + public static char decodeChar(final BsonReader reader) { + BsonType currentBsonType = reader.getCurrentBsonType(); + if (currentBsonType != BsonType.STRING) { + throw new BsonInvalidOperationException(format("Invalid string type, found: %s", currentBsonType)); + } + String string = reader.readString(); + if (string.length() != 1) { + throw new BsonInvalidOperationException(format("Attempting to decode the string '%s' to a character, but its length is not " + + "equal to one", string)); + } + return string.charAt(0); + } +} diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml index 1ef5de78bf5..09af427f8d9 100644 --- a/config/spotbugs/exclude.xml +++ b/config/spotbugs/exclude.xml @@ -217,6 +217,12 @@ + + + + + + diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml index 09af427f8d9..fedf0c72566 100644 --- a/config/spotbugs/exclude.xml +++ b/config/spotbugs/exclude.xml @@ -229,7 +229,7 @@ --> - + @@ -239,4 +239,25 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/driver-core/build.gradle b/driver-core/build.gradle index 40a63c15d49..1f7d06f93f2 100644 --- a/driver-core/build.gradle +++ b/driver-core/build.gradle @@ -58,6 +58,7 @@ dependencies { implementation "org.mongodb:mongodb-crypt:$mongoCryptVersion", optional testImplementation project(':bson').sourceSets.test.output + testImplementation('org.junit.jupiter:junit-jupiter-api') testRuntimeOnly "io.netty:netty-tcnative-boringssl-static" classifiers.forEach { diff --git a/driver-core/src/main/com/mongodb/AwsCredential.java b/driver-core/src/main/com/mongodb/AwsCredential.java index dfd6c86776c..2fd6f8fb6f4 100644 --- a/driver-core/src/main/com/mongodb/AwsCredential.java +++ b/driver-core/src/main/com/mongodb/AwsCredential.java @@ -17,6 +17,7 @@ package com.mongodb; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import static com.mongodb.assertions.Assertions.notNull; @@ -28,7 +29,7 @@ * @see MongoCredential#AWS_CREDENTIAL_PROVIDER_KEY * @since 4.4 */ -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public final class AwsCredential { private final String accessKeyId; private final String secretAccessKey; diff --git a/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java index 2df4b3363d4..ee9b88817e7 100644 --- a/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java +++ b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java @@ -16,15 +16,21 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; import javax.net.ssl.SSLContext; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout; import static java.util.Collections.unmodifiableMap; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** * The client-side settings for data key creation and explicit encryption. @@ -42,6 +48,8 @@ public final class ClientEncryptionSettings { private final Map> kmsProviders; private final Map>> kmsProviderPropertySuppliers; private final Map kmsProviderSslContextMap; + @Nullable + private final Long timeoutMS; /** * A builder for {@code ClientEncryptionSettings} so that {@code ClientEncryptionSettings} can be immutable, and to support easier * construction through chaining. @@ -53,6 +61,8 @@ public static final class Builder { private Map> kmsProviders; private Map>> kmsProviderPropertySuppliers = new HashMap<>(); private Map kmsProviderSslContextMap = new HashMap<>(); + @Nullable + private Long timeoutMS; /** * Sets the {@link MongoClientSettings} that will be used to access the key vault. @@ -120,6 +130,43 @@ public Builder kmsProviderSslContextMap(final Map kmsProvide return this; } + /** + * Sets the time limit for the full execution of an operation. + * + * + * + *

    Note: The timeout set through this method overrides the timeout defined in the key vault client settings + * specified in {@link #keyVaultMongoClientSettings(MongoClientSettings)}. + * Essentially, for operations that require accessing the key vault, the remaining timeout from the initial operation + * determines the duration allowed for key vault access.

    + * + * @param timeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + public ClientEncryptionSettings.Builder timeout(final long timeout, final TimeUnit timeUnit) { + this.timeoutMS = convertAndValidateTimeout(timeout, timeUnit); + return this; + } + /** * Build an instance of {@code ClientEncryptionSettings}. * @@ -253,12 +300,46 @@ public Map getKmsProviderSslContextMap() { return unmodifiableMap(kmsProviderSslContextMap); } + /** + * The time limit for the full execution of an operation. + * + *

    If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

    + * + *
      + *
    • {@code null} means that the timeout mechanism for operations will defer to using: + *
        + *
      • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
      • + *
      • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
      • + *
      • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
      • + *
      • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
      • + *
      • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
      • + *
      + *
    • + *
    • {@code 0} means infinite timeout.
    • + *
    • {@code > 0} The time limit to use for the full execution of an operation.
    • + *
    + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout(final TimeUnit timeUnit) { + return timeoutMS == null ? null : timeUnit.convert(timeoutMS, MILLISECONDS); + } + private ClientEncryptionSettings(final Builder builder) { this.keyVaultMongoClientSettings = notNull("keyVaultMongoClientSettings", builder.keyVaultMongoClientSettings); this.keyVaultNamespace = notNull("keyVaultNamespace", builder.keyVaultNamespace); this.kmsProviders = notNull("kmsProviders", builder.kmsProviders); this.kmsProviderPropertySuppliers = notNull("kmsProviderPropertySuppliers", builder.kmsProviderPropertySuppliers); this.kmsProviderSslContextMap = notNull("kmsProviderSslContextMap", builder.kmsProviderSslContextMap); + this.timeoutMS = builder.timeoutMS; } } diff --git a/driver-core/src/main/com/mongodb/ClientSessionOptions.java b/driver-core/src/main/com/mongodb/ClientSessionOptions.java index 7a272016006..160d16c3486 100644 --- a/driver-core/src/main/com/mongodb/ClientSessionOptions.java +++ b/driver-core/src/main/com/mongodb/ClientSessionOptions.java @@ -16,14 +16,19 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Immutable; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import com.mongodb.session.ClientSession; import java.util.Objects; +import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** * The options to apply to a {@code ClientSession}. @@ -38,6 +43,7 @@ public final class ClientSessionOptions { private final Boolean causallyConsistent; private final Boolean snapshot; + private final Long defaultTimeoutMS; private final TransactionOptions defaultTransactionOptions; /** @@ -77,6 +83,25 @@ public TransactionOptions getDefaultTransactionOptions() { return defaultTransactionOptions; } + /** + * Gets the default time limit for the following operations executed on the session: + * + *
      + *
    • {@code commitTransaction}
    • + *
    • {@code abortTransaction}
    • + *
    • {@code withTransaction}
    • + *
    • {@code close}
    • + *
    + * @param timeUnit the time unit + * @return the default timeout + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getDefaultTimeout(final TimeUnit timeUnit) { + return defaultTimeoutMS == null ? null : timeUnit.convert(defaultTimeoutMS, MILLISECONDS); + } + @Override public boolean equals(final Object o) { if (this == o) { @@ -85,36 +110,24 @@ public boolean equals(final Object o) { if (o == null || getClass() != o.getClass()) { return false; } - - ClientSessionOptions that = (ClientSessionOptions) o; - - if (!Objects.equals(causallyConsistent, that.causallyConsistent)) { - return false; - } - - if (!Objects.equals(snapshot, that.snapshot)) { - return false; - } - if (!Objects.equals(defaultTransactionOptions, that.defaultTransactionOptions)) { - return false; - } - - return true; + final ClientSessionOptions that = (ClientSessionOptions) o; + return Objects.equals(causallyConsistent, that.causallyConsistent) + && Objects.equals(snapshot, that.snapshot) + && Objects.equals(defaultTimeoutMS, that.defaultTimeoutMS) + && Objects.equals(defaultTransactionOptions, that.defaultTransactionOptions); } @Override public int hashCode() { - int result = causallyConsistent != null ? causallyConsistent.hashCode() : 0; - result = 31 * result + (snapshot != null ? snapshot.hashCode() : 0); - result = 31 * result + (defaultTransactionOptions != null ? defaultTransactionOptions.hashCode() : 0); - return result; + return Objects.hash(causallyConsistent, snapshot, defaultTimeoutMS, defaultTransactionOptions); } @Override public String toString() { return "ClientSessionOptions{" + "causallyConsistent=" + causallyConsistent - + "snapshot=" + snapshot + + ", snapshot=" + snapshot + + ", defaultTimeoutMS=" + defaultTimeoutMS + ", defaultTransactionOptions=" + defaultTransactionOptions + '}'; } @@ -141,6 +154,7 @@ public static Builder builder(final ClientSessionOptions options) { builder.causallyConsistent = options.isCausallyConsistent(); builder.snapshot = options.isSnapshot(); builder.defaultTransactionOptions = options.getDefaultTransactionOptions(); + builder.defaultTimeoutMS = options.defaultTimeoutMS; return builder; } @@ -151,6 +165,7 @@ public static Builder builder(final ClientSessionOptions options) { public static final class Builder { private Boolean causallyConsistent; private Boolean snapshot; + private Long defaultTimeoutMS; private TransactionOptions defaultTransactionOptions = TransactionOptions.builder().build(); /** @@ -196,6 +211,27 @@ public Builder defaultTransactionOptions(final TransactionOptions defaultTransac return this; } + /** + * Sets the default time limit for the following operations executed on the session: + * + *
      + *
    • {@code commitTransaction}
    • + *
    • {@code abortTransaction}
    • + *
    • {@code withTransaction}
    • + *
    • {@code close}
    • + *
    + * @param defaultTimeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + * @see #getDefaultTimeout + */ + @Alpha(Reason.CLIENT) + public Builder defaultTimeout(final long defaultTimeout, final TimeUnit timeUnit) { + this.defaultTimeoutMS = convertAndValidateTimeout(defaultTimeout, timeUnit, "defaultTimeout"); + return this; + } + /** * Build the session options instance. * @@ -218,5 +254,6 @@ private ClientSessionOptions(final Builder builder) { : Boolean.valueOf(!builder.snapshot); this.snapshot = builder.snapshot; this.defaultTransactionOptions = builder.defaultTransactionOptions; + this.defaultTimeoutMS = builder.defaultTimeoutMS; } } diff --git a/driver-core/src/main/com/mongodb/ConnectionString.java b/driver-core/src/main/com/mongodb/ConnectionString.java index 17a990ea127..f779ab7290d 100644 --- a/driver-core/src/main/com/mongodb/ConnectionString.java +++ b/driver-core/src/main/com/mongodb/ConnectionString.java @@ -16,6 +16,8 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import com.mongodb.connection.ClusterSettings; import com.mongodb.connection.ConnectionPoolSettings; import com.mongodb.connection.ServerMonitoringMode; @@ -139,9 +141,12 @@ *
  • {@code sslInvalidHostNameAllowed=true|false}: Whether to allow invalid host names for TLS connections.
  • *
  • {@code tlsAllowInvalidHostnames=true|false}: Whether to allow invalid host names for TLS connections. Supersedes the * sslInvalidHostNameAllowed option
  • + *
  • {@code timeoutMS=ms}: Time limit for the full execution of an operation. Note: This parameter is part of an {@linkplain Alpha Alpha API} and may be + * subject to changes or even removal in future releases.
  • *
  • {@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.
  • *
  • {@code socketTimeoutMS=ms}: How long a receive on a socket can take before timing out. - * This option is the same as {@link SocketSettings#getReadTimeout(TimeUnit)}.
  • + * This option is the same as {@link SocketSettings#getReadTimeout(TimeUnit)}. + * Deprecated, use {@code timeoutMS} instead. *
  • {@code maxIdleTimeMS=ms}: Maximum idle time of a pooled connection. A connection that exceeds this limit will be closed
  • *
  • {@code maxLifeTimeMS=ms}: Maximum life time of a pooled connection. A connection that exceeds this limit will be closed
  • * @@ -161,7 +166,7 @@ *
  • {@code waitQueueTimeoutMS=ms}: The maximum duration to wait until either: * an {@linkplain ConnectionCheckedOutEvent in-use connection} becomes {@linkplain ConnectionCheckedInEvent available}, * or a {@linkplain ConnectionCreatedEvent connection is created} and begins to be {@linkplain ConnectionReadyEvent established}. - * See {@link #getMaxWaitTime()} for more details.
  • + * See {@link #getMaxWaitTime()} for more details. . Deprecated, use {@code timeoutMS} instead. *
  • {@code maxConnecting=n}: The maximum number of connections a pool may be establishing concurrently.
  • * *

    Write concern configuration:

    @@ -189,7 +194,7 @@ *
  • {@code wtimeoutMS=ms} *
      *
    • The driver adds { wtimeout : ms } to all write commands. Implies {@code safe=true}.
    • - *
    • Used in combination with {@code w}
    • + *
    • Used in combination with {@code w}. Deprecated, use {@code timeoutMS} instead
    • *
    *
  • * @@ -311,6 +316,7 @@ public class ConnectionString { private Integer maxConnectionLifeTime; private Integer maxConnecting; private Integer connectTimeout; + private Long timeout; private Integer socketTimeout; private Boolean sslEnabled; private Boolean sslInvalidHostnameAllowed; @@ -503,6 +509,7 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient credential = createCredentials(combinedOptionsMaps, userName, password); warnOnUnsupportedOptions(combinedOptionsMaps); + warnDeprecatedTimeouts(combinedOptionsMaps); } private static final Set GENERAL_OPTIONS_KEYS = new LinkedHashSet<>(); @@ -511,16 +518,18 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient private static final Set WRITE_CONCERN_KEYS = new HashSet<>(); private static final Set COMPRESSOR_KEYS = new HashSet<>(); private static final Set ALL_KEYS = new HashSet<>(); + private static final Set DEPRECATED_TIMEOUT_KEYS = new HashSet<>(); static { GENERAL_OPTIONS_KEYS.add("minpoolsize"); GENERAL_OPTIONS_KEYS.add("maxpoolsize"); + GENERAL_OPTIONS_KEYS.add("timeoutms"); + GENERAL_OPTIONS_KEYS.add("sockettimeoutms"); GENERAL_OPTIONS_KEYS.add("waitqueuetimeoutms"); GENERAL_OPTIONS_KEYS.add("connecttimeoutms"); GENERAL_OPTIONS_KEYS.add("maxidletimems"); GENERAL_OPTIONS_KEYS.add("maxlifetimems"); GENERAL_OPTIONS_KEYS.add("maxconnecting"); - GENERAL_OPTIONS_KEYS.add("sockettimeoutms"); // Order matters here: Having tls after ssl means than the tls option will supersede the ssl option when both are set GENERAL_OPTIONS_KEYS.add("ssl"); @@ -583,6 +592,10 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient ALL_KEYS.addAll(READ_PREFERENCE_KEYS); ALL_KEYS.addAll(WRITE_CONCERN_KEYS); ALL_KEYS.addAll(COMPRESSOR_KEYS); + + DEPRECATED_TIMEOUT_KEYS.add("sockettimeoutms"); + DEPRECATED_TIMEOUT_KEYS.add("waitqueuetimeoutms"); + DEPRECATED_TIMEOUT_KEYS.add("wtimeoutms"); } // Any options contained in the connection string completely replace the corresponding options specified in TXT records, @@ -596,15 +609,23 @@ private Map> combineOptionsMaps(final Map> optionsMap) { - for (final String key : optionsMap.keySet()) { - if (!ALL_KEYS.contains(key)) { - if (LOGGER.isWarnEnabled()) { - LOGGER.warn(format("Connection string contains unsupported option '%s'.", key)); - } - } + if (LOGGER.isWarnEnabled()) { + optionsMap.keySet() + .stream() + .filter(k -> !ALL_KEYS.contains(k)) + .forEach(k -> LOGGER.warn(format("Connection string contains unsupported option '%s'.", k))); + } + } + private void warnDeprecatedTimeouts(final Map> optionsMap) { + if (LOGGER.isWarnEnabled()) { + optionsMap.keySet() + .stream() + .filter(DEPRECATED_TIMEOUT_KEYS::contains) + .forEach(k -> LOGGER.warn(format("Use of deprecated timeout option: '%s'. Prefer 'timeoutMS' instead.", k))); } } + private void translateOptions(final Map> optionsMap) { boolean tlsInsecureSet = false; boolean tlsAllowInvalidHostnamesSet = false; @@ -639,6 +660,9 @@ private void translateOptions(final Map> optionsMap) { case "sockettimeoutms": socketTimeout = parseInteger(value, "sockettimeoutms"); break; + case "timeoutms": + timeout = parseLong(value, "timeoutms"); + break; case "proxyhost": proxyHost = value; break; @@ -1159,6 +1183,15 @@ private int parseInteger(final String input, final String key) { } } + private long parseLong(final String input, final String key) { + try { + return Long.parseLong(input); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(format("The connection string contains an invalid value for '%s'. " + + "'%s' is not a valid long", key, input)); + } + } + private List parseHosts(final List rawHosts) { if (rawHosts.size() == 0){ throw new IllegalArgumentException("The connection string must contain at least one host"); @@ -1533,6 +1566,38 @@ public Integer getMaxConnecting() { return maxConnecting; } + /** + * The time limit for the full execution of an operation in milliseconds. + * + *

    If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

    + * + *
      + *
    • {@code null} means that the timeout mechanism for operations will defer to using: + *
        + *
      • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
      • + *
      • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
      • + *
      • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
      • + *
      • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
      • + *
      • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
      • + *
      + *
    • + *
    • {@code 0} means infinite timeout.
    • + *
    • {@code > 0} The time limit to use for the full execution of an operation.
    • + *
    + * + * @return the time limit for the full execution of an operation in milliseconds or null. + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout() { + return timeout; + } + /** * Gets the socket connect timeout specified in the connection string. * @return the socket connect timeout @@ -1737,6 +1802,7 @@ public boolean equals(final Object o) { && Objects.equals(maxConnectionLifeTime, that.maxConnectionLifeTime) && Objects.equals(maxConnecting, that.maxConnecting) && Objects.equals(connectTimeout, that.connectTimeout) + && Objects.equals(timeout, that.timeout) && Objects.equals(socketTimeout, that.socketTimeout) && Objects.equals(proxyHost, that.proxyHost) && Objects.equals(proxyPort, that.proxyPort) @@ -1760,7 +1826,7 @@ public boolean equals(final Object o) { public int hashCode() { return Objects.hash(credential, isSrvProtocol, hosts, database, collection, directConnection, readPreference, writeConcern, retryWrites, retryReads, readConcern, minConnectionPoolSize, maxConnectionPoolSize, maxWaitTime, - maxConnectionIdleTime, maxConnectionLifeTime, maxConnecting, connectTimeout, socketTimeout, sslEnabled, + maxConnectionIdleTime, maxConnectionLifeTime, maxConnecting, connectTimeout, timeout, socketTimeout, sslEnabled, sslInvalidHostnameAllowed, requiredReplicaSetName, serverSelectionTimeout, localThreshold, heartbeatFrequency, serverMonitoringMode, applicationName, compressorList, uuidRepresentation, srvServiceName, srvMaxHosts, proxyHost, proxyPort, proxyUsername, proxyPassword); diff --git a/driver-core/src/main/com/mongodb/MongoClientSettings.java b/driver-core/src/main/com/mongodb/MongoClientSettings.java index 0d98bbe33d3..31206e56029 100644 --- a/driver-core/src/main/com/mongodb/MongoClientSettings.java +++ b/driver-core/src/main/com/mongodb/MongoClientSettings.java @@ -16,8 +16,10 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Immutable; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; import com.mongodb.client.gridfs.codecs.GridFSFileCodecProvider; import com.mongodb.client.model.geojson.codecs.GeoJsonCodecProvider; import com.mongodb.client.model.mql.ExpressionCodecProvider; @@ -49,9 +51,12 @@ import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.concurrent.TimeUnit; +import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; @@ -111,6 +116,8 @@ public final class MongoClientSettings { private final ContextProvider contextProvider; private final DnsClient dnsClient; private final InetAddressResolver inetAddressResolver; + @Nullable + private final Long timeoutMS; /** * Gets the default codec registry. It includes the following providers: @@ -226,6 +233,7 @@ public static final class Builder { private int heartbeatConnectTimeoutMS; private int heartbeatSocketTimeoutMS; + private Long timeoutMS; private ContextProvider contextProvider; private DnsClient dnsClient; @@ -249,6 +257,7 @@ private Builder(final MongoClientSettings settings) { uuidRepresentation = settings.getUuidRepresentation(); serverApi = settings.getServerApi(); dnsClient = settings.getDnsClient(); + timeoutMS = settings.getTimeout(MILLISECONDS); inetAddressResolver = settings.getInetAddressResolver(); transportSettings = settings.getTransportSettings(); autoEncryptionSettings = settings.getAutoEncryptionSettings(); @@ -311,6 +320,9 @@ public Builder applyConnectionString(final ConnectionString connectionString) { if (connectionString.getWriteConcern() != null) { writeConcern = connectionString.getWriteConcern(); } + if (connectionString.getTimeout() != null) { + timeoutMS = connectionString.getTimeout(); + } return this; } @@ -666,6 +678,39 @@ public Builder inetAddressResolver(@Nullable final InetAddressResolver inetAddre return this; } + + /** + * Sets the time limit for the full execution of an operation. + * + *
      + *
    • {@code null} means that the timeout mechanism for operations will defer to using: + *
        + *
      • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
      • + *
      • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
      • + *
      • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
      • + *
      • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
      • + *
      • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
      • + *
      + *
    • + *
    • {@code 0} means infinite timeout.
    • + *
    • {@code > 0} The time limit to use for the full execution of an operation.
    • + *
    + * + * @param timeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + public Builder timeout(final long timeout, final TimeUnit timeUnit) { + this.timeoutMS = convertAndValidateTimeout(timeout, timeUnit); + return this; + } + // Package-private to provide interop with MongoClientOptions Builder heartbeatConnectTimeoutMS(final int heartbeatConnectTimeoutMS) { this.heartbeatConnectTimeoutMS = heartbeatConnectTimeoutMS; @@ -846,6 +891,39 @@ public ServerApi getServerApi() { return serverApi; } + /** + * The time limit for the full execution of an operation. + * + *

    If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

    + * + *
      + *
    • {@code null} means that the timeout mechanism for operations will defer to using: + *
        + *
      • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
      • + *
      • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
      • + *
      • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
      • + *
      • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
      • + *
      • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
      • + *
      + *
    • + *
    • {@code 0} means infinite timeout.
    • + *
    • {@code > 0} The time limit to use for the full execution of an operation.
    • + *
    + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout(final TimeUnit timeUnit) { + return timeoutMS == null ? null : timeUnit.convert(timeoutMS, MILLISECONDS); + } + /** * Gets the auto-encryption settings. *

    @@ -996,7 +1074,8 @@ public boolean equals(final Object o) { && Objects.equals(autoEncryptionSettings, that.autoEncryptionSettings) && Objects.equals(dnsClient, that.dnsClient) && Objects.equals(inetAddressResolver, that.inetAddressResolver) - && Objects.equals(contextProvider, that.contextProvider); + && Objects.equals(contextProvider, that.contextProvider) + && Objects.equals(timeoutMS, that.timeoutMS); } @Override @@ -1005,7 +1084,8 @@ public int hashCode() { commandListeners, codecRegistry, loggerSettings, clusterSettings, socketSettings, heartbeatSocketSettings, connectionPoolSettings, serverSettings, sslSettings, applicationName, compressorList, uuidRepresentation, serverApi, autoEncryptionSettings, heartbeatSocketTimeoutSetExplicitly, - heartbeatConnectTimeoutSetExplicitly, dnsClient, inetAddressResolver, contextProvider); + heartbeatConnectTimeoutSetExplicitly, dnsClient, inetAddressResolver, contextProvider, timeoutMS); + } @Override @@ -1035,10 +1115,12 @@ public String toString() { + ", dnsClient=" + dnsClient + ", inetAddressResolver=" + inetAddressResolver + ", contextProvider=" + contextProvider + + ", timeoutMS=" + timeoutMS + '}'; } private MongoClientSettings(final Builder builder) { + isTrue("timeoutMS > 0 ", builder.timeoutMS == null || builder.timeoutMS >= 0); readPreference = builder.readPreference; writeConcern = builder.writeConcern; retryWrites = builder.retryWrites; @@ -1073,5 +1155,6 @@ private MongoClientSettings(final Builder builder) { heartbeatSocketTimeoutSetExplicitly = builder.heartbeatSocketTimeoutMS != 0; heartbeatConnectTimeoutSetExplicitly = builder.heartbeatConnectTimeoutMS != 0; contextProvider = builder.contextProvider; + timeoutMS = builder.timeoutMS; } } diff --git a/driver-core/src/main/com/mongodb/MongoCredential.java b/driver-core/src/main/com/mongodb/MongoCredential.java index 8f731027cf4..f55251a7603 100644 --- a/driver-core/src/main/com/mongodb/MongoCredential.java +++ b/driver-core/src/main/com/mongodb/MongoCredential.java @@ -19,6 +19,7 @@ import com.mongodb.annotations.Beta; import com.mongodb.annotations.Evolving; import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import java.time.Duration; @@ -182,7 +183,7 @@ public final class MongoCredential { * @see AwsCredential * @since 4.4 */ - @Beta(Beta.Reason.CLIENT) + @Beta(Reason.CLIENT) public static final String AWS_CREDENTIAL_PROVIDER_KEY = "AWS_CREDENTIAL_PROVIDER"; /** diff --git a/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java b/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java index a48328b5ca9..e257991ccda 100644 --- a/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java +++ b/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java @@ -16,6 +16,8 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import org.bson.BsonDocument; /** @@ -26,6 +28,18 @@ public class MongoExecutionTimeoutException extends MongoException { private static final long serialVersionUID = 5955669123800274594L; + /** + * Construct a new instance. + * + * @param message the error message + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public MongoExecutionTimeoutException(final String message) { + super(message); + + } + /** * Construct a new instance. * diff --git a/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java b/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java new file mode 100644 index 00000000000..707df3e7b73 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; + +import java.util.concurrent.TimeUnit; + +/** + * Exception thrown to indicate that a MongoDB operation has exceeded the specified timeout for + * the full execution of operation. + * + *

    The {@code MongoOperationTimeoutException} might provide information about the underlying + * cause of the timeout, if available. For example, if retries are attempted due to transient failures, + * and a timeout occurs in any of the attempts, the exception from one of the retries may be appended + * as the cause to this {@code MongoOperationTimeoutException}. + * + *

    The key difference between {@code MongoOperationTimeoutException} and {@code MongoExecutionTimeoutException} + * lies in the nature of these exceptions. {@code MongoExecutionTimeoutException} indicates a server-side timeout + * capped by a user-specified number. These server errors are transformed into the new {@code MongoOperationTimeoutException}. + * On the other hand, {@code MongoOperationExecutionException} denotes a timeout during the execution of the entire operation. + * + * @see MongoClientSettings.Builder#timeout(long, TimeUnit) + * @see MongoClientSettings#getTimeout(TimeUnit) + * @since 5.2 + */ +@Alpha(Reason.CLIENT) +public final class MongoOperationTimeoutException extends MongoTimeoutException { + private static final long serialVersionUID = 1L; + + /** + * Construct a new instance. + * + * @param message the message + */ + public MongoOperationTimeoutException(final String message) { + super(message); + } + + /** + * Construct a new instance + * @param message the message + * @param cause the cause + */ + public MongoOperationTimeoutException(final String message, final Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java b/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java new file mode 100644 index 00000000000..bd95430e595 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; + +/** + * This exception is thrown when there is a timeout writing a response from the socket. + * + * @since 5.2 + */ +@Alpha(Reason.CLIENT) +public class MongoSocketWriteTimeoutException extends MongoSocketException { + + private static final long serialVersionUID = 1L; + + /** + * Construct a new instance + * + * @param message the message + * @param address the address + * @param cause the cause + */ + public MongoSocketWriteTimeoutException(final String message, final ServerAddress address, final Throwable cause) { + super(message, address, cause); + } + +} diff --git a/driver-core/src/main/com/mongodb/MongoTimeoutException.java b/driver-core/src/main/com/mongodb/MongoTimeoutException.java index ff9623b09f0..e2cce02403a 100644 --- a/driver-core/src/main/com/mongodb/MongoTimeoutException.java +++ b/driver-core/src/main/com/mongodb/MongoTimeoutException.java @@ -16,6 +16,9 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; + /** * An exception indicating that the driver has timed out waiting for either a server or a connection to become available. */ @@ -31,4 +34,15 @@ public class MongoTimeoutException extends MongoClientException { public MongoTimeoutException(final String message) { super(message); } + + /** + * Construct a new instance + * @param message the message + * @param cause the cause + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public MongoTimeoutException(final String message, final Throwable cause) { + super(message, cause); + } } diff --git a/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java index 1db6b4eba07..c91a3c87fc5 100644 --- a/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java +++ b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java @@ -16,6 +16,7 @@ package com.mongodb; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import org.bson.BsonDocument; import static com.mongodb.assertions.Assertions.assertNotNull; @@ -26,7 +27,7 @@ * * @since 4.9 */ -@Beta(Beta.Reason.SERVER) +@Beta(Reason.SERVER) public final class MongoUpdatedEncryptedFieldsException extends MongoClientException { private static final long serialVersionUID = 1; diff --git a/driver-core/src/main/com/mongodb/TransactionOptions.java b/driver-core/src/main/com/mongodb/TransactionOptions.java index e4cafe9161c..e5f22c22def 100644 --- a/driver-core/src/main/com/mongodb/TransactionOptions.java +++ b/driver-core/src/main/com/mongodb/TransactionOptions.java @@ -16,7 +16,9 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import java.util.Objects; @@ -24,6 +26,7 @@ import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeoutNullable; import static java.util.concurrent.TimeUnit.MILLISECONDS; /** @@ -42,6 +45,7 @@ public final class TransactionOptions { private final WriteConcern writeConcern; private final ReadPreference readPreference; private final Long maxCommitTimeMS; + private final Long timeoutMS; /** * Gets the read concern. @@ -91,6 +95,34 @@ public Long getMaxCommitTime(final TimeUnit timeUnit) { return timeUnit.convert(maxCommitTimeMS, MILLISECONDS); } + /** + * The time limit for the full execution of the transaction. + * + *

    If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

    + * + *
      + *
    • {@code null} means that the timeout mechanism for operations will defer to using + * {@link ClientSessionOptions#getDefaultTimeout(TimeUnit)} or {@link MongoClientSettings#getTimeout(TimeUnit)} + *
    • + *
    • {@code 0} means infinite timeout.
    • + *
    • {@code > 0} The time limit to use for the full execution of an operation.
    • + *
    + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Nullable + @Alpha(Reason.CLIENT) + public Long getTimeout(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (timeoutMS == null) { + return null; + } + return timeUnit.convert(timeoutMS, MILLISECONDS); + } + /** * Gets an instance of a builder * @@ -120,6 +152,9 @@ public static TransactionOptions merge(final TransactionOptions options, final T .maxCommitTime(options.getMaxCommitTime(MILLISECONDS) == null ? defaultOptions.getMaxCommitTime(MILLISECONDS) : options.getMaxCommitTime(MILLISECONDS), MILLISECONDS) + .timeout(options.getTimeout(MILLISECONDS) == null + ? defaultOptions.getTimeout(MILLISECONDS) : options.getTimeout(MILLISECONDS), + MILLISECONDS) .build(); } @@ -134,6 +169,9 @@ public boolean equals(final Object o) { TransactionOptions that = (TransactionOptions) o; + if (!Objects.equals(timeoutMS, that.timeoutMS)) { + return false; + } if (!Objects.equals(maxCommitTimeMS, that.maxCommitTimeMS)) { return false; } @@ -156,6 +194,7 @@ public int hashCode() { result = 31 * result + (writeConcern != null ? writeConcern.hashCode() : 0); result = 31 * result + (readPreference != null ? readPreference.hashCode() : 0); result = 31 * result + (maxCommitTimeMS != null ? maxCommitTimeMS.hashCode() : 0); + result = 31 * result + (timeoutMS != null ? timeoutMS.hashCode() : 0); return result; } @@ -165,7 +204,8 @@ public String toString() { + "readConcern=" + readConcern + ", writeConcern=" + writeConcern + ", readPreference=" + readPreference - + ", maxCommitTimeMS" + maxCommitTimeMS + + ", maxCommitTimeMS=" + maxCommitTimeMS + + ", timeoutMS=" + timeoutMS + '}'; } @@ -177,6 +217,8 @@ public static final class Builder { private WriteConcern writeConcern; private ReadPreference readPreference; private Long maxCommitTimeMS; + @Nullable + private Long timeoutMS; /** * Sets the read concern. @@ -231,6 +273,36 @@ public Builder maxCommitTime(@Nullable final Long maxCommitTime, final TimeUnit return this; } + /** + * Sets the time limit for the full execution of the operations for this transaction. + * + *
      + *
    • {@code null} means that the timeout mechanism for operations will defer to using: + *
        + *
      • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
      • + *
      • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
      • + *
      • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
      • + *
      • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
      • + *
      • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
      • + *
      + *
    • + *
    • {@code 0} means infinite timeout.
    • + *
    • {@code > 0} The time limit to use for the full execution of an operation.
    • + *
    + * + * @param timeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public Builder timeout(@Nullable final Long timeout, final TimeUnit timeUnit) { + this.timeoutMS = convertAndValidateTimeoutNullable(timeout, timeUnit); + return this; + } + /** * Build the transaction options instance. * @@ -250,5 +322,6 @@ private TransactionOptions(final Builder builder) { writeConcern = builder.writeConcern; readPreference = builder.readPreference; maxCommitTimeMS = builder.maxCommitTimeMS; + timeoutMS = builder.timeoutMS; } } diff --git a/driver-core/src/main/com/mongodb/annotations/Alpha.java b/driver-core/src/main/com/mongodb/annotations/Alpha.java new file mode 100644 index 00000000000..3698c7ac860 --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Alpha.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2010 The Guava Authors + * Copyright 2011 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that a public API element is in the early stages of development, subject to + * incompatible changes, or even removal, in a future release and may lack some intended features. + * An API bearing this annotation may contain known issues affecting functionality, performance, + * and stability. It is also exempt from any compatibility guarantees made by its containing library. + * + *

    It is inadvisable for applications to use Alpha APIs in production environments or + * for libraries (which get included on users' CLASSPATHs, outside the library developers' + * control) to depend on these APIs. Alpha APIs are intended for experimental purposes only.

    + */ +@Retention(RetentionPolicy.CLASS) +@Target({ + ElementType.ANNOTATION_TYPE, + ElementType.CONSTRUCTOR, + ElementType.FIELD, + ElementType.METHOD, + ElementType.PACKAGE, + ElementType.TYPE }) +@Documented +@Beta(Reason.CLIENT) +public @interface Alpha { + /** + * @return The reason an API element is marked with {@link Alpha}. + */ + Reason[] value(); +} diff --git a/driver-core/src/main/com/mongodb/annotations/Beta.java b/driver-core/src/main/com/mongodb/annotations/Beta.java index a44dae43cd5..55753ddc051 100644 --- a/driver-core/src/main/com/mongodb/annotations/Beta.java +++ b/driver-core/src/main/com/mongodb/annotations/Beta.java @@ -47,25 +47,10 @@ ElementType.PACKAGE, ElementType.TYPE }) @Documented -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public @interface Beta { /** * @return The reason an API element is marked with {@link Beta}. */ Reason[] value(); - - /** - * @see Beta#value() - */ - enum Reason { - /** - * The driver API is in preview. - */ - CLIENT, - /** - * The driver API relies on the server API, which is in preview. - * We still may decide to change the driver API even if the server API stays unchanged. - */ - SERVER - } } diff --git a/driver-core/src/main/com/mongodb/annotations/Reason.java b/driver-core/src/main/com/mongodb/annotations/Reason.java new file mode 100644 index 00000000000..af72098a9de --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Reason.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.annotations; + +/** + * Enumerates the reasons an API element might be marked with annotations like {@link Alpha} or {@link Beta}. + */ +@Beta(Reason.CLIENT) +public enum Reason { + /** + * Indicates that the status of the driver API is the reason for the annotation. + */ + CLIENT, + + /** + * The driver API relies on the server API. + * This dependency is the reason for the annotation and suggests that changes in the server API could impact the driver API. + */ + SERVER +} diff --git a/driver-core/src/main/com/mongodb/assertions/Assertions.java b/driver-core/src/main/com/mongodb/assertions/Assertions.java index 9866c222c6d..a40b4e4b7b6 100644 --- a/driver-core/src/main/com/mongodb/assertions/Assertions.java +++ b/driver-core/src/main/com/mongodb/assertions/Assertions.java @@ -20,10 +20,11 @@ import com.mongodb.lang.Nullable; import java.util.Collection; +import java.util.function.Function; import java.util.function.Supplier; /** - *

    Design by contract assertions.

    This class is not part of the public API and may be removed or changed at any time.

    + *

    Design by contract assertions.

    * All {@code assert...} methods throw {@link AssertionError} and should be used to check conditions which may be violated if and only if * the driver code is incorrect. The intended usage of this methods is the same as of the * Java {@code assert} statement. The reason @@ -104,6 +105,24 @@ public static void isTrueArgument(final String name, final boolean condition) { } } + /** + * Throw IllegalArgumentException if the condition returns false. + * + * @param msg the error message if the condition returns false + * @param supplier the supplier of the value + * @param condition the condition function + * @return the supplied value if it meets the condition + * @param the type of the supplied value + */ + public static T isTrueArgument(final String msg, final Supplier supplier, final Function condition) { + T value = doesNotThrow(supplier); + if (!condition.apply(value)) { + throw new IllegalArgumentException(msg); + } + + return value; + } + /** * Throw IllegalArgumentException if the collection contains a null value. * diff --git a/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java b/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java new file mode 100644 index 00000000000..cdaa92d4923 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.cursor; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; + +import java.util.concurrent.TimeUnit; + +/** + * The timeout mode for a cursor + * + *

    For operations that create cursors, {@code timeoutMS} can either cap the lifetime of the cursor or be applied separately to the + * original operation and all next calls. + *

    + * @see com.mongodb.MongoClientSettings#getTimeout(TimeUnit) + * @since 5.2 + */ +@Alpha(Reason.CLIENT) +public enum TimeoutMode { + + /** + * The timeout lasts for the lifetime of the cursor + */ + CURSOR_LIFETIME, + + /** + * The timeout is reset for each batch iteration of the cursor + */ + ITERATION +} diff --git a/driver-core/src/main/com/mongodb/client/cursor/package-info.java b/driver-core/src/main/com/mongodb/client/cursor/package-info.java new file mode 100644 index 00000000000..ea907688087 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/cursor/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains models and options that help describe MongoCollection operations + */ +@NonNullApi +package com.mongodb.client.cursor; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/Aggregates.java b/driver-core/src/main/com/mongodb/client/model/Aggregates.java index 08e2fb10b02..53e9e1eaf52 100644 --- a/driver-core/src/main/com/mongodb/client/model/Aggregates.java +++ b/driver-core/src/main/com/mongodb/client/model/Aggregates.java @@ -18,6 +18,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.client.model.densify.DensifyOptions; import com.mongodb.client.model.densify.DensifyRange; import com.mongodb.client.model.fill.FillOptions; @@ -955,7 +956,7 @@ public static Bson searchMeta(final SearchCollector collector, final SearchOptio * @mongodb.server.release 6.0.10 * @since 4.11 */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) public static Bson vectorSearch( final FieldSearchPath path, final Iterable queryVector, @@ -984,7 +985,7 @@ public static Bson vectorSearch( * @mongodb.server.release 6.0.10 * @since 4.11 */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) public static Bson vectorSearch( final FieldSearchPath path, final Iterable queryVector, diff --git a/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java index 5aa79112871..31165688d4a 100644 --- a/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java @@ -18,6 +18,7 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.conversions.Bson; @@ -353,7 +354,7 @@ public CreateCollectionOptions changeStreamPreAndPostImagesOptions( * @since 4.7 * @mongodb.server.release 7.0 */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) @Nullable public Bson getEncryptedFields() { return encryptedFields; @@ -370,7 +371,7 @@ public Bson getEncryptedFields() { * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption * @mongodb.server.release 7.0 */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) public CreateCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) { this.encryptedFields = encryptedFields; return this; diff --git a/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java index eba101ac000..537efdc1716 100644 --- a/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java +++ b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java @@ -17,6 +17,7 @@ package com.mongodb.client.model; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -28,7 +29,7 @@ * * @since 4.9 */ -@Beta(Beta.Reason.SERVER) +@Beta(Reason.SERVER) public final class CreateEncryptedCollectionParams { private final String kmsProvider; @Nullable diff --git a/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java index 5c904888c00..cf2dbca66c4 100644 --- a/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java @@ -18,6 +18,7 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.conversions.Bson; @@ -39,7 +40,7 @@ public class DropCollectionOptions { * @since 4.7 * @mongodb.server.release 7.0 */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) @Nullable public Bson getEncryptedFields() { return encryptedFields; @@ -56,7 +57,7 @@ public Bson getEncryptedFields() { * @mongodb.server.release 7.0 * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) public DropCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) { this.encryptedFields = encryptedFields; return this; diff --git a/driver-core/src/main/com/mongodb/client/model/Projections.java b/driver-core/src/main/com/mongodb/client/model/Projections.java index e92a95abf81..98fd2810ed5 100644 --- a/driver-core/src/main/com/mongodb/client/model/Projections.java +++ b/driver-core/src/main/com/mongodb/client/model/Projections.java @@ -17,6 +17,7 @@ package com.mongodb.client.model; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.client.model.search.FieldSearchPath; import com.mongodb.client.model.search.SearchCollector; import com.mongodb.client.model.search.SearchCount; @@ -223,7 +224,7 @@ public static Bson metaSearchScore(final String fieldName) { * @mongodb.server.release 6.0.10 * @since 4.11 */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) public static Bson metaVectorSearchScore(final String fieldName) { return meta(fieldName, "vectorSearchScore"); } diff --git a/driver-core/src/main/com/mongodb/client/model/mql/Branches.java b/driver-core/src/main/com/mongodb/client/model/mql/Branches.java index 1a576cfe581..c6b414de213 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/Branches.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/Branches.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.assertions.Assertions; import java.util.ArrayList; @@ -36,7 +37,7 @@ * @param the type of the values that may be checked. * @since 4.9.0 */ -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public final class Branches { Branches() { diff --git a/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java b/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java index 9b1b88e4467..b068c118ad3 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.assertions.Assertions; import java.util.ArrayList; @@ -32,7 +33,7 @@ * @param the type of the value produced. * @since 4.9.0 */ -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public final class BranchesIntermediary extends BranchesTerminal { BranchesIntermediary(final List>> branches) { super(branches, null); diff --git a/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java b/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java index f72cb5cb1f4..299942ebdbf 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import java.util.List; @@ -30,7 +31,7 @@ * @param the type of the value produced. * @since 4.9.0 */ -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public class BranchesTerminal { private final List>> branches; diff --git a/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java b/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java index d4176b7205f..893c57c5c86 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java @@ -18,6 +18,7 @@ import com.mongodb.annotations.Beta; import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.codecs.Codec; import org.bson.codecs.configuration.CodecProvider; @@ -35,7 +36,7 @@ * * @since 4.9.0 */ -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) @Immutable public final class ExpressionCodecProvider implements CodecProvider { @Override diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java index 047e294c8e9..e979b4687e7 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.util.function.Function; @@ -33,7 +34,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlArray extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java index 5e594a757c7..28290cf25f4 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.util.function.Function; @@ -28,7 +29,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlBoolean extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java index 7c39057ee23..b6600aaf689 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.util.function.Function; @@ -30,7 +31,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlDate extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java index b99d5b3354b..c60fde8f82a 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.assertions.Assertions; import org.bson.conversions.Bson; @@ -40,7 +41,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlDocument extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java index bcb1f26e251..dffa35405f1 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -34,7 +35,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlEntry extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java index 0fe85fd88d9..46380b57773 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.util.function.Function; @@ -30,7 +31,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlInteger extends MqlNumber { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java index 24ee3ef405b..58a279c89c7 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.assertions.Assertions; @@ -35,7 +36,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlMap extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java index ec3099047b8..7b6590b7624 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.assertions.Assertions; @@ -31,7 +32,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlNumber extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java index dd24a8c94a2..e5b6e8fa8bc 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.util.function.Function; @@ -30,7 +31,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlString extends MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java index 9366ce77fe9..8cb50885584 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.util.function.Function; @@ -89,7 +90,7 @@ * @since 4.9.0 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MqlValue { /** diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java index 8d791dc6b3b..a2d58fbc02b 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.assertions.Assertions; import org.bson.BsonArray; import org.bson.BsonBoolean; @@ -46,7 +47,7 @@ * * @since 4.9.0 */ -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public final class MqlValues { private MqlValues() {} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/package-info.java b/driver-core/src/main/com/mongodb/client/model/mql/package-info.java index 08cbc6195a7..caef0925787 100644 --- a/driver-core/src/main/com/mongodb/client/model/mql/package-info.java +++ b/driver-core/src/main/com/mongodb/client/model/mql/package-info.java @@ -19,8 +19,9 @@ * @see com.mongodb.client.model.mql.MqlValues * @since 4.9.0 */ -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) @NonNullApi package com.mongodb.client.model.mql; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java index 11411ca923d..d8a2fe5e908 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface AddSearchScoreExpression extends SearchScoreExpression { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java index 2a700e6a770..447de8168cd 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -24,7 +25,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface AutocompleteSearchOperator extends SearchOperator { @Override AutocompleteSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java index 3d1549fb2fa..b12a86ae78a 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface CompoundSearchOperator extends CompoundSearchOperatorBase, SearchOperator { @Override CompoundSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java index f3fe27dbe3d..2834199a4e0 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java +++ b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -26,7 +27,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface CompoundSearchOperatorBase { /** * Creates a new {@link CompoundSearchOperator} by adding to it {@code clauses} that must all be satisfied. diff --git a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java index 31c9cfb4c21..463df7634e3 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java +++ b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface ConstantSearchScore extends SearchScore { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java index e7ae9be59f2..691ee643572 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface ConstantSearchScoreExpression extends SearchScoreExpression { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java index 5edb7a02756..8421d058eeb 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.time.Duration; @@ -27,7 +28,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface DateNearSearchOperator extends SearchOperator { @Override DateNearSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java index dfa98485837..f8c654cae1d 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -24,7 +25,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface DateRangeSearchOperator extends DateRangeSearchOperatorBase, SearchOperator { @Override DateRangeSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java index b7db8c190e9..df8fbaa93d8 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java +++ b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import java.time.Instant; @@ -29,7 +30,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface DateRangeSearchOperatorBase { /** * Creates a new {@link DateRangeSearchOperator} that tests if values are within (l; ∞). diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java index 936ac3040f8..39d8bb2ddf0 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java +++ b/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface DateSearchFacet extends SearchFacet { /** * Creates a new {@link DateSearchFacet} with the default bucket specified. diff --git a/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java index cb847a49b66..847070dc3bc 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface ExistsSearchOperator extends SearchOperator { @Override ExistsSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java b/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java index 72be0245b2c..01190216633 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java +++ b/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface FacetSearchCollector extends SearchCollector { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java index cc4b89f6381..2be4cdecb90 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java +++ b/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import org.bson.conversions.Bson; @@ -26,7 +27,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface FieldSearchPath extends SearchPath { /** * Creates a new {@link FieldSearchPath} with the name of the alternate analyzer specified. diff --git a/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java index 92b414ebbc8..df23133d1a8 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -27,7 +28,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface FilterCompoundSearchOperator extends CompoundSearchOperator { @Override FilterCompoundSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java index 047cf65b2e4..e2bf09bf1a5 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java +++ b/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface FunctionSearchScore extends SearchScore { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java index 7afe5fc1c8a..2acbb244537 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import org.bson.conversions.Bson; @@ -27,7 +28,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface FuzzySearchOptions extends Bson { /** * Creates a new {@link FuzzySearchOptions} with the maximum diff --git a/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java index 038d5973d78..b3ac5fadedb 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface GaussSearchScoreExpression extends SearchScoreExpression { /** * Creates a new {@link GaussSearchScoreExpression} which does not decay, i.e., its output stays 1, if the value of the diff --git a/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java index 5c02fce3030..1501bbd819e 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.geojson.Point; @@ -25,7 +26,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface GeoNearSearchOperator extends SearchOperator { @Override GeoNearSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java index f1499a5de16..40ad061cbcb 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface Log1pSearchScoreExpression extends SearchScoreExpression { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java index 10ad3b9d40d..ae4e5fa8725 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface LogSearchScoreExpression extends SearchScoreExpression { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java index 888d66d50b0..15576d4a5b6 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java +++ b/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface LowerBoundSearchCount extends SearchCount { /** * Creates a new {@link LowerBoundSearchCount} that instructs to count documents up to the {@code threshold} exactly, diff --git a/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java index 31d330ba161..e6ab2332bfe 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MultiplySearchScoreExpression extends SearchScoreExpression { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java index e9715a9b076..d9db7f7e34b 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -27,7 +28,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MustCompoundSearchOperator extends CompoundSearchOperator { @Override MustCompoundSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java index aad0bb633cc..5bdcc56009d 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -27,7 +28,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface MustNotCompoundSearchOperator extends CompoundSearchOperator { @Override MustNotCompoundSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java index 1baf5f2303f..65d6ec4969e 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -24,7 +25,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface NumberNearSearchOperator extends SearchOperator { @Override NumberNearSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java index e0acad425c6..fe5d37bdc41 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -24,7 +25,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface NumberRangeSearchOperator extends NumberRangeSearchOperatorBase, SearchOperator { @Override NumberRangeSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java index 2492f1db11c..daa31d48656 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -27,7 +28,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface NumberRangeSearchOperatorBase { /** * Creates a new {@link NumberRangeSearchOperator} that tests if values are within (l; ∞). diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java index 4fc6bc27d21..4587f688097 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface NumberSearchFacet extends SearchFacet { /** * Creates a new {@link NumberSearchFacet} with the default bucket specified. diff --git a/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java index 37c675e523b..40459fa1724 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java +++ b/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface PathBoostSearchScore extends SearchScore { /** * Creates a new {@link PathBoostSearchScore} with the value to fall back to diff --git a/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java index a144addae89..b3c14025f4e 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface PathSearchScoreExpression extends SearchScoreExpression { /** * Creates a new {@link PathSearchScoreExpression} with the value to fall back to diff --git a/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java index 89491f5c935..2a36a679ad5 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface RelevanceSearchScoreExpression extends SearchScoreExpression { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java b/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java index a93c5690699..6f2c45b4961 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Aggregates; import com.mongodb.client.model.Projections; @@ -34,7 +35,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface SearchCollector extends Bson { /** * Returns a {@link SearchCollector} that groups results by values or ranges in the specified faceted fields and returns the count @@ -45,7 +46,7 @@ public interface SearchCollector extends Bson { * @return The requested {@link SearchCollector}. * @mongodb.atlas.manual atlas-search/facet/ facet collector */ - @Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) + @Beta({Reason.CLIENT, Reason.SERVER}) static FacetSearchCollector facet(final SearchOperator operator, final Iterable facets) { notNull("operator", operator); notNull("facets", facets); diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java index bb80a894f95..f9a5917582b 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Projections; import org.bson.BsonDocument; @@ -33,7 +34,7 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface SearchCount extends Bson { /** * Returns a {@link SearchCount} that instructs to count documents exactly. diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java index fcc4e2866b8..4aac0fef089 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import org.bson.BsonDocument; import org.bson.BsonType; @@ -43,7 +44,7 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface SearchFacet extends Bson { /** * Returns a {@link SearchFacet} that allows narrowing down search results based on the most frequent diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java b/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java index c337be57e5b..6610c57590f 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Projections; import org.bson.BsonDocument; @@ -37,7 +38,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface SearchHighlight extends Bson { /** * Creates a new {@link SearchHighlight} with the maximum number of characters to examine on a document diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java index e9fd4796234..9234db91c51 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Aggregates; import com.mongodb.client.model.geojson.Point; @@ -40,7 +41,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface SearchOperator extends Bson { /** * Creates a new {@link SearchOperator} with the scoring modifier specified. diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java index 8550c672ee5..f5cd0261e8f 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Aggregates; import org.bson.conversions.Bson; @@ -29,7 +30,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface SearchOptions extends Bson { /** * Creates a new {@link SearchOptions} with the index name specified. @@ -53,7 +54,7 @@ public interface SearchOptions extends Bson { * @param option The counting option. * @return A new {@link SearchOptions}. */ - @Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) + @Beta({Reason.CLIENT, Reason.SERVER}) SearchOptions count(SearchCount option); /** @@ -63,7 +64,7 @@ public interface SearchOptions extends Bson { * @return A new {@link SearchOptions}. * @mongodb.atlas.manual atlas-search/return-stored-source/ Return stored source fields */ - @Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) + @Beta({Reason.CLIENT, Reason.SERVER}) SearchOptions returnStoredSource(boolean returnStoredSource); /** diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java index c620c2995f0..7213f3f894b 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java @@ -17,6 +17,7 @@ import com.mongodb.annotations.Beta; import com.mongodb.annotations.Sealed; +import com.mongodb.annotations.Reason; import com.mongodb.internal.client.model.Util; import org.bson.BsonDocument; import org.bson.BsonString; @@ -37,7 +38,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface SearchPath extends Bson { /** * Returns a {@link SearchPath} for the given {@code path}. diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java index 7c241e8ec06..825264cf7f5 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Projections; import org.bson.BsonDocument; @@ -34,7 +35,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface SearchScore extends Bson { /** * Returns a {@link SearchScore} that instructs to multiply the score by the specified {@code value}. diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java index 442b361d813..268786c3344 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import org.bson.BsonDocument; import org.bson.BsonDouble; @@ -36,7 +37,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface SearchScoreExpression extends Bson { /** * Returns a {@link SearchScoreExpression} that evaluates into the relevance score of a document. diff --git a/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java index 388a08bcb03..a6bda94e206 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -27,7 +28,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface ShouldCompoundSearchOperator extends CompoundSearchOperator { @Override ShouldCompoundSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java index 523d20bfe98..209eaf9ff47 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java +++ b/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,7 +24,7 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface StringSearchFacet extends SearchFacet { /** * Creates a new {@link StringSearchFacet} that explicitly limits the number of facet categories. diff --git a/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java index 71d1206d2d7..241639f3a47 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java +++ b/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -24,7 +25,7 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface TextSearchOperator extends SearchOperator { @Override TextSearchOperator score(SearchScore modifier); diff --git a/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java index 5df56e6bbbd..2bcbde468f3 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java +++ b/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER}) +@Beta({Reason.CLIENT, Reason.SERVER}) public interface TotalSearchCount extends SearchCount { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java index 5b180b7c14f..d760bd60d52 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java +++ b/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface ValueBoostSearchScore extends SearchScore { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java index e512ab0a31c..df3607d039b 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Aggregates; import com.mongodb.client.model.Filters; @@ -30,7 +31,7 @@ * @since 4.11 */ @Sealed -@Beta(Beta.Reason.SERVER) +@Beta(Reason.SERVER) public interface VectorSearchOptions extends Bson { /** * Creates a new {@link VectorSearchOptions} with the filter specified. diff --git a/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java index 9fb66644fbd..2fceaaaad7a 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java +++ b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java @@ -16,6 +16,7 @@ package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; /** @@ -23,6 +24,6 @@ * @since 4.7 */ @Sealed -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) public interface WildcardSearchPath extends SearchPath { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/package-info.java b/driver-core/src/main/com/mongodb/client/model/search/package-info.java index d17cba4139e..c3664cb5560 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/package-info.java +++ b/driver-core/src/main/com/mongodb/client/model/search/package-info.java @@ -31,8 +31,9 @@ * @since 4.7 */ @NonNullApi -@Beta(Beta.Reason.CLIENT) +@Beta(Reason.CLIENT) package com.mongodb.client.model.search; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java index aef24b54765..509e467273b 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.vault; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.BsonBinary; @@ -181,7 +182,7 @@ public String getQueryType() { * @mongodb.server.release 6.2 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) { this.rangeOptions = rangeOptions; return this; @@ -195,7 +196,7 @@ public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) { * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ @Nullable - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) public RangeOptions getRangeOptions() { return rangeOptions; } diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java index b763b0bf112..42a6618bcdb 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java @@ -17,6 +17,7 @@ package com.mongodb.client.model.vault; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -33,7 +34,7 @@ * @mongodb.server.release 6.2 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ -@Beta(Beta.Reason.SERVER) +@Beta(Reason.SERVER) public class RangeOptions { private BsonValue min; diff --git a/driver-core/src/main/com/mongodb/connection/ServerDescription.java b/driver-core/src/main/com/mongodb/connection/ServerDescription.java index d97e848c163..f3de13006d1 100644 --- a/driver-core/src/main/com/mongodb/connection/ServerDescription.java +++ b/driver-core/src/main/com/mongodb/connection/ServerDescription.java @@ -18,8 +18,10 @@ import com.mongodb.ServerAddress; import com.mongodb.TagSet; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Immutable; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; import com.mongodb.internal.connection.DecimalFormatHelper; import com.mongodb.internal.connection.Time; import com.mongodb.lang.Nullable; @@ -70,6 +72,10 @@ public class ServerDescription { private final ServerAddress address; private final ServerType type; + /** + * Identifies whether the server is a mongocryptd. + */ + private final boolean cryptd; private final String canonicalAddress; private final Set hosts; private final Set passives; @@ -79,6 +85,7 @@ public class ServerDescription { private final TagSet tagSet; private final String setName; private final long roundTripTimeNanos; + private final long minRoundTripTimeNanos; private final boolean ok; private final ServerConnectionState state; @@ -159,6 +166,7 @@ public boolean isHelloOk() { public static class Builder { private ServerAddress address; private ServerType type = UNKNOWN; + private boolean cryptd = false; private String canonicalAddress; private Set hosts = Collections.emptySet(); private Set passives = Collections.emptySet(); @@ -168,6 +176,7 @@ public static class Builder { private TagSet tagSet = new TagSet(); private String setName; private long roundTripTimeNanos; + private long minRoundTripTimeNanos; private boolean ok; private ServerConnectionState state; private int minWireVersion = 0; @@ -188,6 +197,7 @@ public static class Builder { Builder(final ServerDescription serverDescription) { this.address = serverDescription.address; this.type = serverDescription.type; + this.cryptd = serverDescription.cryptd; this.canonicalAddress = serverDescription.canonicalAddress; this.hosts = serverDescription.hosts; this.passives = serverDescription.passives; @@ -245,6 +255,17 @@ public Builder type(final ServerType type) { return this; } + /** + * Sets whether this server is a mongocryptd. + * + * @param cryptd true if this server is a mongocryptd. + * @return this + */ + public Builder cryptd(final boolean cryptd) { + this.cryptd = cryptd; + return this; + } + /** * Sets all members of the replica set that are neither hidden, passive, nor arbiters. * @@ -315,7 +336,7 @@ public Builder tagSet(@Nullable final TagSet tagSet) { } /** - * Set the time it took to make the round trip for requesting this information from the server + * Set the weighted average time it took to make the round trip for requesting this information from the server * * @param roundTripTime the time taken * @param timeUnit the units of the time taken @@ -326,6 +347,21 @@ public Builder roundTripTime(final long roundTripTime, final TimeUnit timeUnit) return this; } + + /** + * Set the recent min time it took to make the round trip for requesting this information from the server + * + * @param minRoundTripTime the minimum time taken + * @param timeUnit the units of the time taken + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public Builder minRoundTripTime(final long minRoundTripTime, final TimeUnit timeUnit) { + this.minRoundTripTimeNanos = timeUnit.toNanos(minRoundTripTime); + return this; + } + /** * Sets the name of the replica set * @@ -628,6 +664,15 @@ public boolean isSecondary() { return ok && (type == REPLICA_SET_SECONDARY || type == SHARD_ROUTER || type == STANDALONE || type == LOAD_BALANCER); } + /** + * Returns whether this server is mongocryptd. + * + * @return true if this server is a mongocryptd. + */ + public boolean isCryptd() { + return cryptd; + } + /** * Get a Set of strings in the format of "[hostname]:[port]" that contains all members of the replica set that are neither hidden, * passive, nor arbiters. @@ -824,7 +869,7 @@ public ClusterType getClusterType() { } /** - * Get the time it took to make the round trip for requesting this information from the server in nanoseconds. + * Get the weighted average time it took to make the round trip for requesting this information from the server in nanoseconds. * * @return the time taken to request the information, in nano seconds */ @@ -832,6 +877,17 @@ public long getRoundTripTimeNanos() { return roundTripTimeNanos; } + /** + * Get the recent min time it took to make the round trip for requesting this information from the server in nanoseconds. + * + * @return the recent min time taken to request the information, in nano seconds + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public long getMinRoundTripTimeNanos() { + return minRoundTripTimeNanos; + } + /** * Gets the exception thrown while attempting to determine the server description. This is useful for diagnostic purposed when * determining the root cause of a connectivity failure. @@ -843,12 +899,6 @@ public Throwable getException() { return exception; } - /** - * Returns true if this instance is equals to @code{o}. Note that equality is defined to NOT include the round trip time. - * - * @param o the object to compare to - * @return true if this instance is equals to @code{o} - */ @Override public boolean equals(final Object o) { if (this == o) { @@ -857,7 +907,6 @@ public boolean equals(final Object o) { if (o == null || getClass() != o.getClass()) { return false; } - ServerDescription that = (ServerDescription) o; if (maxDocumentSize != that.maxDocumentSize) { @@ -928,6 +977,10 @@ public boolean equals(final Object o) { return false; } + if (cryptd != that.cryptd) { + return false; + } + // Compare class equality and message as exceptions rarely override equals Class thisExceptionClass = exception != null ? exception.getClass() : null; Class thatExceptionClass = that.exception != null ? that.exception.getClass() : null; @@ -946,30 +999,9 @@ public boolean equals(final Object o) { @Override public int hashCode() { - int result = address.hashCode(); - result = 31 * result + type.hashCode(); - result = 31 * result + (canonicalAddress != null ? canonicalAddress.hashCode() : 0); - result = 31 * result + hosts.hashCode(); - result = 31 * result + passives.hashCode(); - result = 31 * result + arbiters.hashCode(); - result = 31 * result + (primary != null ? primary.hashCode() : 0); - result = 31 * result + maxDocumentSize; - result = 31 * result + tagSet.hashCode(); - result = 31 * result + (setName != null ? setName.hashCode() : 0); - result = 31 * result + (electionId != null ? electionId.hashCode() : 0); - result = 31 * result + (setVersion != null ? setVersion.hashCode() : 0); - result = 31 * result + (topologyVersion != null ? topologyVersion.hashCode() : 0); - result = 31 * result + (lastWriteDate != null ? lastWriteDate.hashCode() : 0); - result = 31 * result + (int) (lastUpdateTimeNanos ^ (lastUpdateTimeNanos >>> 32)); - result = 31 * result + (ok ? 1 : 0); - result = 31 * result + state.hashCode(); - result = 31 * result + minWireVersion; - result = 31 * result + maxWireVersion; - result = 31 * result + (logicalSessionTimeoutMinutes != null ? logicalSessionTimeoutMinutes.hashCode() : 0); - result = 31 * result + (helloOk ? 1 : 0); - result = 31 * result + (exception == null ? 0 : exception.getClass().hashCode()); - result = 31 * result + (exception == null ? 0 : exception.getMessage().hashCode()); - return result; + return Objects.hash(address, type, cryptd, canonicalAddress, hosts, passives, arbiters, primary, maxDocumentSize, tagSet, setName, + roundTripTimeNanos, minRoundTripTimeNanos, ok, state, minWireVersion, maxWireVersion, electionId, setVersion, + topologyVersion, lastWriteDate, lastUpdateTimeNanos, logicalSessionTimeoutMinutes, exception, helloOk); } @Override @@ -977,6 +1009,7 @@ public String toString() { return "ServerDescription{" + "address=" + address + ", type=" + type + + ", cryptd=" + cryptd + ", state=" + state + (state == CONNECTED ? @@ -986,6 +1019,7 @@ public String toString() { + ", maxDocumentSize=" + maxDocumentSize + ", logicalSessionTimeoutMinutes=" + logicalSessionTimeoutMinutes + ", roundTripTimeNanos=" + roundTripTimeNanos + + ", minRoundTripTimeNanos=" + minRoundTripTimeNanos : "") + (isReplicaSetMember() ? @@ -1047,6 +1081,7 @@ private String getRoundTripFormattedInMilliseconds() { ServerDescription(final Builder builder) { address = notNull("address", builder.address); type = notNull("type", builder.type); + cryptd = builder.cryptd; state = notNull("state", builder.state); canonicalAddress = builder.canonicalAddress; hosts = builder.hosts; @@ -1057,6 +1092,7 @@ private String getRoundTripFormattedInMilliseconds() { tagSet = builder.tagSet; setName = builder.setName; roundTripTimeNanos = builder.roundTripTimeNanos; + minRoundTripTimeNanos = builder.minRoundTripTimeNanos; ok = builder.ok; minWireVersion = builder.minWireVersion; maxWireVersion = builder.maxWireVersion; diff --git a/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java b/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java index 96083f66833..9ccb5ef0c8b 100644 --- a/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java +++ b/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java @@ -17,6 +17,8 @@ package com.mongodb.internal; import com.mongodb.MongoCommandException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonInt32; @@ -35,6 +37,15 @@ *

    This class is not part of the public API and may be removed or changed at any time

    */ public final class ExceptionUtils { + + public static boolean isMongoSocketException(final Throwable e) { + return e instanceof MongoSocketException; + } + + public static boolean isOperationTimeoutFromSocketException(final Throwable e) { + return e instanceof MongoOperationTimeoutException && e.getCause() instanceof MongoSocketException; + } + public static final class MongoCommandExceptionUtils { public static int extractErrorCode(final BsonDocument response) { return extractErrorCodeAsBson(response).intValue(); diff --git a/driver-core/src/main/com/mongodb/internal/Locks.java b/driver-core/src/main/com/mongodb/internal/Locks.java index 984de156f27..8e8260f50d3 100644 --- a/driver-core/src/main/com/mongodb/internal/Locks.java +++ b/driver-core/src/main/com/mongodb/internal/Locks.java @@ -17,6 +17,7 @@ package com.mongodb.internal; import com.mongodb.MongoInterruptedException; +import com.mongodb.internal.function.CheckedSupplier; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; diff --git a/driver-core/src/main/com/mongodb/internal/TimeoutContext.java b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java new file mode 100644 index 00000000000..0b4907c2ff1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java @@ -0,0 +1,379 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.time.StartTime; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; + +import java.util.Objects; +import java.util.function.LongConsumer; + +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * Timeout Context. + * + *

    The context for handling timeouts in relation to the Client Side Operation Timeout specification.

    + */ +public class TimeoutContext { + + private final boolean isMaintenanceContext; + private final TimeoutSettings timeoutSettings; + + @Nullable + private Timeout timeout; + @Nullable + private Timeout computedServerSelectionTimeout; + private long minRoundTripTimeMS = 0; + + @Nullable + private MaxTimeSupplier maxTimeSupplier = null; + + public static MongoOperationTimeoutException createMongoRoundTripTimeoutException() { + return createMongoTimeoutException("Remaining timeoutMS is less than or equal to the server's minimum round trip time."); + } + + public static MongoOperationTimeoutException createMongoTimeoutException(final String message) { + return new MongoOperationTimeoutException(message); + } + + public static T throwMongoTimeoutException(final String message) { + throw new MongoOperationTimeoutException(message); + } + + public static MongoOperationTimeoutException createMongoTimeoutException(final Throwable cause) { + return createMongoTimeoutException("Operation exceeded the timeout limit: " + cause.getMessage(), cause); + } + + public static MongoOperationTimeoutException createMongoTimeoutException(final String message, final Throwable cause) { + if (cause instanceof MongoOperationTimeoutException) { + return (MongoOperationTimeoutException) cause; + } + return new MongoOperationTimeoutException(message, cause); + } + + public static TimeoutContext createMaintenanceTimeoutContext(final TimeoutSettings timeoutSettings) { + return new TimeoutContext(true, timeoutSettings, startTimeout(timeoutSettings.getTimeoutMS())); + } + + public static TimeoutContext createTimeoutContext(final ClientSession session, final TimeoutSettings timeoutSettings) { + TimeoutContext sessionTimeoutContext = session.getTimeoutContext(); + + if (sessionTimeoutContext != null) { + TimeoutSettings sessionTimeoutSettings = sessionTimeoutContext.timeoutSettings; + if (timeoutSettings.getGenerationId() > sessionTimeoutSettings.getGenerationId()) { + throw new MongoClientException("Cannot change the timeoutMS during a transaction."); + } + + // Check for any legacy operation timeouts + if (sessionTimeoutSettings.getTimeoutMS() == null) { + if (timeoutSettings.getMaxTimeMS() != 0) { + sessionTimeoutSettings = sessionTimeoutSettings.withMaxTimeMS(timeoutSettings.getMaxTimeMS()); + } + if (timeoutSettings.getMaxAwaitTimeMS() != 0) { + sessionTimeoutSettings = sessionTimeoutSettings.withMaxAwaitTimeMS(timeoutSettings.getMaxAwaitTimeMS()); + } + if (timeoutSettings.getMaxCommitTimeMS() != null) { + sessionTimeoutSettings = sessionTimeoutSettings.withMaxCommitMS(timeoutSettings.getMaxCommitTimeMS()); + } + return new TimeoutContext(sessionTimeoutSettings); + } + return sessionTimeoutContext; + } + return new TimeoutContext(timeoutSettings); + } + + // Creates a copy of the timeout context that can be reset without resetting the original. + public TimeoutContext copyTimeoutContext() { + return new TimeoutContext(getTimeoutSettings(), getTimeout()); + } + + public TimeoutContext(final TimeoutSettings timeoutSettings) { + this(false, timeoutSettings, startTimeout(timeoutSettings.getTimeoutMS())); + } + + private TimeoutContext(final TimeoutSettings timeoutSettings, @Nullable final Timeout timeout) { + this(false, timeoutSettings, timeout); + } + + private TimeoutContext(final boolean isMaintenanceContext, final TimeoutSettings timeoutSettings, @Nullable final Timeout timeout) { + this.isMaintenanceContext = isMaintenanceContext; + this.timeoutSettings = timeoutSettings; + this.timeout = timeout; + } + + /** + * Allows for the differentiation between users explicitly setting a global operation timeout via {@code timeoutMS}. + * + * @return true if a timeout has been set. + */ + public boolean hasTimeoutMS() { + return timeoutSettings.getTimeoutMS() != null; + } + + /** + * Runs the runnable if the timeout is expired. + * @param onExpired the runnable to run + */ + public void onExpired(final Runnable onExpired) { + Timeout.nullAsInfinite(timeout).onExpired(onExpired); + } + + /** + * Sets the recent min round trip time + * @param minRoundTripTimeMS the min round trip time + * @return this + */ + public TimeoutContext minRoundTripTimeMS(final long minRoundTripTimeMS) { + isTrue("'minRoundTripTimeMS' must be a positive number", minRoundTripTimeMS >= 0); + this.minRoundTripTimeMS = minRoundTripTimeMS; + return this; + } + + @Nullable + public Timeout timeoutIncludingRoundTrip() { + return timeout == null ? null : timeout.shortenBy(minRoundTripTimeMS, MILLISECONDS); + } + + /** + * Returns the remaining {@code timeoutMS} if set or the {@code alternativeTimeoutMS}. + * + * @param alternativeTimeoutMS the alternative timeout. + * @return timeout to use. + */ + public long timeoutOrAlternative(final long alternativeTimeoutMS) { + if (timeout == null) { + return alternativeTimeoutMS; + } else { + return timeout.call(MILLISECONDS, + () -> 0L, + (ms) -> ms, + () -> throwMongoTimeoutException("The operation exceeded the timeout limit.")); + } + } + + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + + public long getMaxAwaitTimeMS() { + return timeoutSettings.getMaxAwaitTimeMS(); + } + + public void runMaxTimeMS(final LongConsumer onRemaining) { + if (maxTimeSupplier != null) { + runWithFixedTimeout(maxTimeSupplier.get(), onRemaining); + return; + } + if (timeout == null) { + runWithFixedTimeout(timeoutSettings.getMaxTimeMS(), onRemaining); + return; + } + timeout.shortenBy(minRoundTripTimeMS, MILLISECONDS) + .run(MILLISECONDS, + () -> {}, + onRemaining, + () -> { + throw createMongoRoundTripTimeoutException(); + }); + + } + + private static void runWithFixedTimeout(final long ms, final LongConsumer onRemaining) { + if (ms != 0) { + onRemaining.accept(ms); + } + } + + public void resetToDefaultMaxTime() { + this.maxTimeSupplier = null; + } + + /** + * The override will be provided as the remaining value in + * {@link #runMaxTimeMS}, where 0 is ignored. + *

    + * NOTE: Suitable for static user-defined values only (i.e MaxAwaitTimeMS), + * not for running timeouts that adjust dynamically. + */ + public void setMaxTimeOverride(final long maxTimeMS) { + this.maxTimeSupplier = () -> maxTimeMS; + } + + /** + * The override will be provided as the remaining value in + * {@link #runMaxTimeMS}, where 0 is ignored. + */ + public void setMaxTimeOverrideToMaxCommitTime() { + this.maxTimeSupplier = () -> getMaxCommitTimeMS(); + } + + @VisibleForTesting(otherwise = PRIVATE) + public long getMaxCommitTimeMS() { + Long maxCommitTimeMS = timeoutSettings.getMaxCommitTimeMS(); + return timeoutOrAlternative(maxCommitTimeMS != null ? maxCommitTimeMS : 0); + } + + public long getReadTimeoutMS() { + return timeoutOrAlternative(timeoutSettings.getReadTimeoutMS()); + } + + public long getWriteTimeoutMS() { + return timeoutOrAlternative(0); + } + + public int getConnectTimeoutMs() { + final long connectTimeoutMS = getTimeoutSettings().getConnectTimeoutMS(); + return Math.toIntExact(Timeout.nullAsInfinite(timeout).call(MILLISECONDS, + () -> connectTimeoutMS, + (ms) -> connectTimeoutMS == 0 ? ms : Math.min(ms, connectTimeoutMS), + () -> throwMongoTimeoutException("The operation exceeded the timeout limit."))); + } + + public void resetTimeoutIfPresent() { + if (hasTimeoutMS()) { + timeout = startTimeout(timeoutSettings.getTimeoutMS()); + } + } + + /** + * Resets the timeout if this timeout context is being used by pool maintenance + */ + public void resetMaintenanceTimeout() { + if (!isMaintenanceContext) { + return; + } + timeout = Timeout.nullAsInfinite(timeout).call(NANOSECONDS, + () -> timeout, + (ms) -> startTimeout(timeoutSettings.getTimeoutMS()), + () -> startTimeout(timeoutSettings.getTimeoutMS())); + } + + public TimeoutContext withAdditionalReadTimeout(final int additionalReadTimeout) { + // Only used outside timeoutMS usage + assertNull(timeout); + + // Check existing read timeout is infinite + if (timeoutSettings.getReadTimeoutMS() == 0) { + return this; + } + + long newReadTimeout = getReadTimeoutMS() + additionalReadTimeout; + return new TimeoutContext(timeoutSettings.withReadTimeoutMS(newReadTimeout > 0 ? newReadTimeout : Long.MAX_VALUE)); + } + + @Override + public String toString() { + return "TimeoutContext{" + + "isMaintenanceContext=" + isMaintenanceContext + + ", timeoutSettings=" + timeoutSettings + + ", timeout=" + timeout + + ", minRoundTripTimeMS=" + minRoundTripTimeMS + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TimeoutContext that = (TimeoutContext) o; + return isMaintenanceContext == that.isMaintenanceContext + && minRoundTripTimeMS == that.minRoundTripTimeMS + && Objects.equals(timeoutSettings, that.timeoutSettings) + && Objects.equals(timeout, that.timeout); + } + + @Override + public int hashCode() { + return Objects.hash(isMaintenanceContext, timeoutSettings, timeout, minRoundTripTimeMS); + } + + @Nullable + public static Timeout startTimeout(@Nullable final Long timeoutMS) { + if (timeoutMS != null) { + return Timeout.expiresIn(timeoutMS, MILLISECONDS, ZERO_DURATION_MEANS_INFINITE); + } + return null; + } + + /** + * Returns the computed server selection timeout + * + *

    Caches the computed server selection timeout if: + *

      + *
    • not in a maintenance context
    • + *
    • there is a timeoutMS, so to keep the same legacy behavior.
    • + *
    • the server selection timeout is less than the remaining overall timeout.
    • + *
    + * + * @return the timeout context + */ + public Timeout computeServerSelectionTimeout() { + Timeout serverSelectionTimeout = StartTime.now() + .timeoutAfterOrInfiniteIfNegative(getTimeoutSettings().getServerSelectionTimeoutMS(), MILLISECONDS); + + + if (isMaintenanceContext || !hasTimeoutMS()) { + return serverSelectionTimeout; + } + + if (timeout != null && Timeout.earliest(serverSelectionTimeout, timeout) == timeout) { + return timeout; + } + + computedServerSelectionTimeout = serverSelectionTimeout; + return computedServerSelectionTimeout; + } + + /** + * Returns the timeout context to use for the handshake process + * + * @return a new timeout context with the cached computed server selection timeout if available or this + */ + public TimeoutContext withComputedServerSelectionTimeoutContext() { + if (this.hasTimeoutMS() && computedServerSelectionTimeout != null) { + return new TimeoutContext(false, timeoutSettings, computedServerSelectionTimeout); + } + return this; + } + + public Timeout startWaitQueueTimeout(final StartTime checkoutStart) { + final long ms = getTimeoutSettings().getMaxWaitTimeMS(); + return checkoutStart.timeoutAfterOrInfiniteIfNegative(ms, MILLISECONDS); + } + + @Nullable + public Timeout getTimeout() { + return timeout; + } + + public interface MaxTimeSupplier { + long get(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java b/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java new file mode 100644 index 00000000000..486a893d74c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java @@ -0,0 +1,265 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.MongoClientSettings; +import com.mongodb.lang.Nullable; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * Timeout Settings. + * + *

    Includes all client based timeouts

    + */ +public class TimeoutSettings { + private static final AtomicLong NEXT_ID = new AtomicLong(0); + private final long generationId; + private final long serverSelectionTimeoutMS; + private final long connectTimeoutMS; + @Nullable + private final Long timeoutMS; + + // Deprecated configuration timeout options + private final long readTimeoutMS; // aka socketTimeoutMS + private final long maxWaitTimeMS; // aka waitQueueTimeoutMS + @Nullable + private final Long wTimeoutMS; + + // Deprecated options for CRUD methods + private final long maxTimeMS; + private final long maxAwaitTimeMS; + @Nullable + private final Long maxCommitTimeMS; + + public static final TimeoutSettings DEFAULT = create(MongoClientSettings.builder().build()); + + @Nullable + public static Long convertAndValidateTimeoutNullable(@Nullable final Long timeout, final TimeUnit timeUnit) { + return timeout == null ? null : convertAndValidateTimeout(timeout, timeUnit, "timeout"); + } + + public static long convertAndValidateTimeout(final long timeout, final TimeUnit timeUnit) { + return convertAndValidateTimeout(timeout, timeUnit, "timeout"); + } + + public static long convertAndValidateTimeout(final long timeout, final TimeUnit timeUnit, final String fieldName) { + return isTrueArgument(fieldName + " was too small. After conversion it was rounded to 0 milliseconds, " + + " which would result in an unintended infinite timeout.", + () -> MILLISECONDS.convert(timeout, timeUnit), + (timeoutMS) -> timeout == 0 && timeoutMS == 0 || timeoutMS > 0); + } + + @SuppressWarnings("deprecation") + public static TimeoutSettings create(final MongoClientSettings settings) { + return new TimeoutSettings( + settings.getClusterSettings().getServerSelectionTimeout(TimeUnit.MILLISECONDS), + settings.getSocketSettings().getConnectTimeout(TimeUnit.MILLISECONDS), + settings.getSocketSettings().getReadTimeout(TimeUnit.MILLISECONDS), + settings.getTimeout(TimeUnit.MILLISECONDS), + settings.getConnectionPoolSettings().getMaxWaitTime(TimeUnit.MILLISECONDS)); + } + + public static TimeoutSettings createHeartbeatSettings(final MongoClientSettings settings) { + return new TimeoutSettings( + settings.getClusterSettings().getServerSelectionTimeout(TimeUnit.MILLISECONDS), + settings.getHeartbeatSocketSettings().getConnectTimeout(TimeUnit.MILLISECONDS), + settings.getHeartbeatSocketSettings().getReadTimeout(TimeUnit.MILLISECONDS), + settings.getTimeout(TimeUnit.MILLISECONDS), + settings.getConnectionPoolSettings().getMaxWaitTime(TimeUnit.MILLISECONDS)); + } + + public TimeoutSettings(final long serverSelectionTimeoutMS, final long connectTimeoutMS, final long readTimeoutMS, + @Nullable final Long timeoutMS, final long maxWaitTimeMS) { + this(-1, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, 0, 0, null, null, maxWaitTimeMS); + } + + TimeoutSettings(@Nullable final Long timeoutMS, final long serverSelectionTimeoutMS, final long connectTimeoutMS, + final long readTimeoutMS, final long maxAwaitTimeMS, final long maxTimeMS, @Nullable final Long maxCommitTimeMS, + @Nullable final Long wTimeoutMS, final long maxWaitTimeMS) { + this(timeoutMS != null ? NEXT_ID.incrementAndGet() : -1, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, + maxAwaitTimeMS, maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + private TimeoutSettings(final long generationId, @Nullable final Long timeoutMS, final long serverSelectionTimeoutMS, + final long connectTimeoutMS, final long readTimeoutMS, final long maxAwaitTimeMS, final long maxTimeMS, + @Nullable final Long maxCommitTimeMS, @Nullable final Long wTimeoutMS, final long maxWaitTimeMS) { + + isTrueArgument("timeoutMS must be >= 0", timeoutMS == null || timeoutMS >= 0); + isTrueArgument("maxAwaitTimeMS must be >= 0", maxAwaitTimeMS >= 0); + isTrueArgument("maxTimeMS must be >= 0", maxTimeMS >= 0); + isTrueArgument("timeoutMS must be greater than maxAwaitTimeMS", timeoutMS == null || timeoutMS == 0 + || timeoutMS > maxAwaitTimeMS); + isTrueArgument("maxCommitTimeMS must be >= 0", maxCommitTimeMS == null || maxCommitTimeMS >= 0); + + this.generationId = generationId; + this.serverSelectionTimeoutMS = serverSelectionTimeoutMS; + this.connectTimeoutMS = connectTimeoutMS; + this.timeoutMS = timeoutMS; + this.maxAwaitTimeMS = maxAwaitTimeMS; + this.readTimeoutMS = readTimeoutMS; + this.maxTimeMS = maxTimeMS; + this.maxCommitTimeMS = maxCommitTimeMS; + this.wTimeoutMS = wTimeoutMS; + this.maxWaitTimeMS = maxWaitTimeMS; + } + + public TimeoutSettings connectionOnly() { + return new TimeoutSettings(serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, null, maxWaitTimeMS); + } + + public TimeoutSettings withTimeout(@Nullable final Long timeout, final TimeUnit timeUnit) { + return withTimeoutMS(convertAndValidateTimeoutNullable(timeout, timeUnit)); + } + + TimeoutSettings withTimeoutMS(@Nullable final Long timeoutMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxTimeMS(final long maxTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxAwaitTimeMS(final long maxAwaitTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxTimeAndMaxAwaitTimeMS(final long maxTimeMS, final long maxAwaitTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxCommitMS(@Nullable final Long maxCommitTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withWTimeoutMS(@Nullable final Long wTimeoutMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withReadTimeoutMS(final long readTimeoutMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withServerSelectionTimeoutMS(final long serverSelectionTimeoutMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxWaitTimeMS(final long maxWaitTimeMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public long getServerSelectionTimeoutMS() { + return serverSelectionTimeoutMS; + } + + public long getConnectTimeoutMS() { + return connectTimeoutMS; + } + + @Nullable + public Long getTimeoutMS() { + return timeoutMS; + } + + public long getMaxAwaitTimeMS() { + return maxAwaitTimeMS; + } + + public long getReadTimeoutMS() { + return readTimeoutMS; + } + + public long getMaxTimeMS() { + return maxTimeMS; + } + + @Nullable + public Long getWTimeoutMS() { + return wTimeoutMS; + } + + public long getMaxWaitTimeMS() { + return maxWaitTimeMS; + } + + @Nullable + public Long getMaxCommitTimeMS() { + return maxCommitTimeMS; + } + + /** + * The generation id represents a creation counter for {@code TimeoutSettings} that contain a {@code timeoutMS} value. + * + *

    This is used to determine if a new set of {@code TimeoutSettings} has been created within a {@code withTransaction} + * block, so that a client side error can be issued.

    + * + * @return the generation id or -1 if no timeout MS is set. + */ + public long getGenerationId() { + return generationId; + } + + @Override + public String toString() { + return "TimeoutSettings{" + + "generationId=" + generationId + + ", timeoutMS=" + timeoutMS + + ", serverSelectionTimeoutMS=" + serverSelectionTimeoutMS + + ", connectTimeoutMS=" + connectTimeoutMS + + ", readTimeoutMS=" + readTimeoutMS + + ", maxWaitTimeMS=" + maxWaitTimeMS + + ", wTimeoutMS=" + wTimeoutMS + + ", maxTimeMS=" + maxTimeMS + + ", maxAwaitTimeMS=" + maxAwaitTimeMS + + ", maxCommitTimeMS=" + maxCommitTimeMS + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TimeoutSettings that = (TimeoutSettings) o; + return serverSelectionTimeoutMS == that.serverSelectionTimeoutMS && connectTimeoutMS == that.connectTimeoutMS + && readTimeoutMS == that.readTimeoutMS && maxWaitTimeMS == that.maxWaitTimeMS && maxTimeMS == that.maxTimeMS + && maxAwaitTimeMS == that.maxAwaitTimeMS && Objects.equals(timeoutMS, that.timeoutMS) + && Objects.equals(wTimeoutMS, that.wTimeoutMS) && Objects.equals(maxCommitTimeMS, that.maxCommitTimeMS); + } + + @Override + public int hashCode() { + return Objects.hash(generationId, serverSelectionTimeoutMS, connectTimeoutMS, timeoutMS, readTimeoutMS, maxWaitTimeMS, wTimeoutMS, maxTimeMS, + maxAwaitTimeMS, maxCommitTimeMS); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java index 33e1af001bb..a81b2fdd12c 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java @@ -16,6 +16,7 @@ package com.mongodb.internal.async; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.function.RetryState; import com.mongodb.internal.async.function.RetryingAsyncCallbackSupplier; @@ -267,10 +268,10 @@ default AsyncSupplier thenSupply(final AsyncSupplier supplier) { * @see RetryingAsyncCallbackSupplier */ default AsyncRunnable thenRunRetryingWhile( - final AsyncRunnable runnable, final Predicate shouldRetry) { + final TimeoutContext timeoutContext, final AsyncRunnable runnable, final Predicate shouldRetry) { return thenRun(callback -> { new RetryingAsyncCallbackSupplier( - new RetryState(), + new RetryState(timeoutContext), (rs, lastAttemptFailure) -> shouldRetry.test(lastAttemptFailure), // `finish` is required here instead of `unsafeFinish` // because only `finish` meets the contract of diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java index 89329f16a24..e1cecf721fc 100644 --- a/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java +++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java @@ -15,7 +15,9 @@ */ package com.mongodb.internal.async.function; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.LoopState.AttachmentKey; import com.mongodb.lang.NonNull; @@ -29,6 +31,7 @@ import static com.mongodb.assertions.Assertions.assertFalse; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException; /** * Represents both the state associated with a retryable activity and a handle that can be used to affect retrying, e.g., @@ -48,25 +51,62 @@ public final class RetryState { private final LoopState loopState; private final int attempts; + private final boolean retryUntilTimeoutThrowsException; @Nullable - private Throwable exception; + private Throwable previouslyChosenException; /** - * @param retries A non-negative number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited. + * Creates a {@code RetryState} with a positive number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as + * being unlimited. + *

    + * If a timeout is not specified in the {@link TimeoutContext#hasTimeoutMS()}, the specified {@code retries} param acts as a fallback + * bound. Otherwise, retries are unbounded until the timeout is reached. + *

    + * It is possible to provide an additional {@code retryPredicate} in the {@link #doAdvanceOrThrow} method, + * which can be used to stop retrying based on a custom condition additionally to {@code retires} and {@link TimeoutContext}. + *

    + * + * @param retries A positive number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited. + * @param timeoutContext A timeout context that will be used to determine if the operation has timed out. * @see #attempts() */ - public RetryState(final int retries) { - assertTrue(retries >= 0); - loopState = new LoopState(); - attempts = retries == INFINITE_ATTEMPTS ? INFINITE_ATTEMPTS : retries + 1; + public static RetryState withRetryableState(final int retries, final TimeoutContext timeoutContext) { + assertTrue(retries > 0); + if (timeoutContext.hasTimeoutMS()){ + return new RetryState(INFINITE_ATTEMPTS, timeoutContext); + } + return new RetryState(retries, null); + } + + public static RetryState withNonRetryableState() { + return new RetryState(0, null); } /** * Creates a {@link RetryState} that does not limit the number of retries. + * The number of attempts is limited iff {@link TimeoutContext#hasTimeoutMS()} is true and timeout has expired. + *

    + * It is possible to provide an additional {@code retryPredicate} in the {@link #doAdvanceOrThrow} method, + * which can be used to stop retrying based on a custom condition additionally to {@code retires} and {@link TimeoutContext}. + *

    + * + * @param timeoutContext A timeout context that will be used to determine if the operation has timed out. + * @see #attempts() + */ + public RetryState(final TimeoutContext timeoutContext) { + this(INFINITE_ATTEMPTS, timeoutContext); + } + + /** + * @param retries A non-negative number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited. + * @param timeoutContext A timeout context that will be used to determine if the operation has timed out. * @see #attempts() */ - public RetryState() { - this(INFINITE_ATTEMPTS); + private RetryState(final int retries, @Nullable final TimeoutContext timeoutContext) { + assertTrue(retries >= 0); + loopState = new LoopState(); + attempts = retries == INFINITE_ATTEMPTS ? INFINITE_ATTEMPTS : retries + 1; + this.retryUntilTimeoutThrowsException = timeoutContext != null && timeoutContext.hasTimeoutMS(); } /** @@ -136,7 +176,7 @@ void advanceOrThrow(final Throwable attemptException, final BinaryOperator predicate) throws RuntimeException { assertFalse(loopState.isLastIteration()); if (!isFirstAttempt()) { - assertNotNull(exception); - assertTrue(exception instanceof RuntimeException); - RuntimeException localException = (RuntimeException) exception; + assertNotNull(previouslyChosenException); + assertTrue(previouslyChosenException instanceof RuntimeException); + RuntimeException localException = (RuntimeException) previouslyChosenException; try { if (predicate.get()) { loopState.markAsLastIteration(); @@ -310,14 +367,23 @@ public boolean isFirstAttempt() { /** * Returns {@code true} iff the current attempt is known to be the last one, i.e., it is known that no more retries will be made. - * An attempt is known to be the last one either because the number of {@linkplain #attempts() attempts} is limited and the current - * attempt is the last one, or because {@link #breakAndThrowIfRetryAnd(Supplier)} / - * {@link #breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback)} / {@link #markAsLastAttempt()} was called. + * An attempt is known to be the last one iff any of the following applies: + *
      + *
    • {@link #breakAndThrowIfRetryAnd(Supplier)} / {@link #breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback)} / {@link #markAsLastAttempt()} was called.
    • + *
    • A timeout is set and has been reached.
    • + *
    • No timeout is set, and the number of {@linkplain #attempts() attempts} is limited, and the current attempt is the last one.
    • + *
    * * @see #attempts() */ public boolean isLastAttempt() { - return attempt() == attempts - 1 || loopState.isLastIteration(); + if (loopState.isLastIteration()){ + return true; + } + if (retryUntilTimeoutThrowsException) { + return false; + } + return attempt() == attempts - 1; } /** @@ -332,9 +398,9 @@ public int attempt() { /** * Returns a positive maximum number of attempts: *
      - *
    • 0 if the number of retries is {@linkplain #RetryState() unlimited};
    • + *
    • 0 if the number of retries is {@linkplain #RetryState(TimeoutContext) unlimited};
    • *
    • 1 if no retries are allowed;
    • - *
    • {@link #RetryState(int) retries} + 1 otherwise.
    • + *
    • {@link #RetryState(int, TimeoutContext) retries} + 1 otherwise.
    • *
    * * @see #attempt() @@ -353,8 +419,8 @@ public int attempts() { * In synchronous code the returned exception is of the type {@link RuntimeException}. */ public Optional exception() { - assertTrue(exception == null || !isFirstAttempt()); - return Optional.ofNullable(exception); + assertTrue(previouslyChosenException == null || !isFirstAttempt()); + return Optional.ofNullable(previouslyChosenException); } /** @@ -377,7 +443,7 @@ public String toString() { return "RetryState{" + "loopState=" + loopState + ", attempts=" + (attempts == INFINITE_ATTEMPTS ? "infinite" : attempts) - + ", exception=" + exception + + ", exception=" + previouslyChosenException + '}'; } } diff --git a/driver-core/src/main/com/mongodb/internal/async/package-info.java b/driver-core/src/main/com/mongodb/internal/async/package-info.java index f6f0693821d..39b952eead1 100644 --- a/driver-core/src/main/com/mongodb/internal/async/package-info.java +++ b/driver-core/src/main/com/mongodb/internal/async/package-info.java @@ -15,7 +15,6 @@ */ /** - * This package contains cluster and connection event related classes */ @NonNullApi diff --git a/driver-core/src/main/com/mongodb/internal/authentication/package-info.java b/driver-core/src/main/com/mongodb/internal/authentication/package-info.java index 7a697f21ace..bbeb09628af 100644 --- a/driver-core/src/main/com/mongodb/internal/authentication/package-info.java +++ b/driver-core/src/main/com/mongodb/internal/authentication/package-info.java @@ -15,7 +15,6 @@ */ /** - * This package contains cluster and connection event related classes */ @NonNullApi diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java index acf75a3b1e8..fd46261a6df 100644 --- a/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java +++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java @@ -18,26 +18,22 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; import com.mongodb.ServerAddress; -import com.mongodb.ServerApi; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; import com.mongodb.internal.connection.Server; import com.mongodb.internal.selector.ReadPreferenceServerSelector; import com.mongodb.internal.selector.ReadPreferenceWithFallbackServerSelector; import com.mongodb.internal.selector.ServerAddressSelector; import com.mongodb.internal.selector.WritableServerSelector; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; import com.mongodb.selector.ServerSelector; import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.NANOSECONDS; /** * A simple ReadWriteBinding implementation that supplies write connection sources bound to a possibly different primary each time, and a @@ -49,9 +45,6 @@ public class AsyncClusterBinding extends AbstractReferenceCounted implements Asy private final Cluster cluster; private final ReadPreference readPreference; private final ReadConcern readConcern; - @Nullable - private final ServerApi serverApi; - private final RequestContext requestContext; private final OperationContext operationContext; /** @@ -60,18 +53,15 @@ public class AsyncClusterBinding extends AbstractReferenceCounted implements Asy * @param cluster a non-null Cluster which will be used to select a server to bind to * @param readPreference a non-null ReadPreference for read operations * @param readConcern a non-null read concern - * @param serverApi a server API, which may be null - * @param requestContext the request context + * @param operationContext the operation context *

    This class is not part of the public API and may be removed or changed at any time

    */ public AsyncClusterBinding(final Cluster cluster, final ReadPreference readPreference, final ReadConcern readConcern, - @Nullable final ServerApi serverApi, final RequestContext requestContext) { + final OperationContext operationContext) { this.cluster = notNull("cluster", cluster); this.readPreference = notNull("readPreference", readPreference); - this.readConcern = (notNull("readConcern", readConcern)); - this.serverApi = serverApi; - this.requestContext = notNull("requestContext", requestContext); - operationContext = new OperationContext(); + this.readConcern = notNull("readConcern", readConcern); + this.operationContext = notNull("operationContext", operationContext); } @Override @@ -85,22 +75,6 @@ public ReadPreference getReadPreference() { return readPreference; } - @Override - public SessionContext getSessionContext() { - return new ReadConcernAwareNoOpSessionContext(readConcern); - } - - @Override - @Nullable - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return requestContext; - } - @Override public OperationContext getOperationContext() { return operationContext; @@ -163,6 +137,7 @@ private AsyncClusterBindingConnectionSource(final Server server, final ServerDes this.server = server; this.serverDescription = serverDescription; this.appliedReadPreference = appliedReadPreference; + operationContext.getTimeoutContext().minRoundTripTimeMS(NANOSECONDS.toMillis(serverDescription.getMinRoundTripTimeNanos())); AsyncClusterBinding.this.retain(); } @@ -171,22 +146,6 @@ public ServerDescription getServerDescription() { return serverDescription; } - @Override - public SessionContext getSessionContext() { - return new ReadConcernAwareNoOpSessionContext(readConcern); - } - - @Override - @Nullable - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return requestContext; - } - @Override public OperationContext getOperationContext() { return operationContext; diff --git a/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java b/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java index c98e88232ba..c10f0fb16ac 100644 --- a/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java +++ b/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java @@ -16,23 +16,18 @@ package com.mongodb.internal.binding; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; /** *

    This class is not part of the public API and may be removed or changed at any time

    */ public interface BindingContext { - SessionContext getSessionContext(); - - @Nullable - ServerApi getServerApi(); - - RequestContext getRequestContext(); + /** + * Note: Will return the same operation context if called multiple times. + * + * @return the operation context for the binding context. + */ OperationContext getOperationContext(); } diff --git a/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java b/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java index a2223d02014..cd3f8473bbb 100644 --- a/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java +++ b/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java @@ -18,25 +18,21 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; import com.mongodb.ServerAddress; -import com.mongodb.ServerApi; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; import com.mongodb.internal.connection.Server; import com.mongodb.internal.connection.ServerTuple; import com.mongodb.internal.selector.ReadPreferenceServerSelector; import com.mongodb.internal.selector.ReadPreferenceWithFallbackServerSelector; import com.mongodb.internal.selector.ServerAddressSelector; import com.mongodb.internal.selector.WritableServerSelector; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.NANOSECONDS; /** * A simple ReadWriteBinding implementation that supplies write connection sources bound to a possibly different primary each time, and a @@ -48,27 +44,21 @@ public class ClusterBinding extends AbstractReferenceCounted implements ClusterA private final Cluster cluster; private final ReadPreference readPreference; private final ReadConcern readConcern; - @Nullable - private final ServerApi serverApi; - private final RequestContext requestContext; private final OperationContext operationContext; /** * Creates an instance. - * @param cluster a non-null Cluster which will be used to select a server to bind to - * @param readPreference a non-null ReadPreference for read operations - * @param readConcern a non-null read concern - * @param serverApi a server API, which may be null - * @param requestContext the request context + * @param cluster a non-null Cluster which will be used to select a server to bind to + * @param readPreference a non-null ReadPreference for read operations + * @param readConcern a non-null read concern + * @param operationContext the operation context */ public ClusterBinding(final Cluster cluster, final ReadPreference readPreference, final ReadConcern readConcern, - @Nullable final ServerApi serverApi, final RequestContext requestContext) { + final OperationContext operationContext) { this.cluster = notNull("cluster", cluster); this.readPreference = notNull("readPreference", readPreference); this.readConcern = notNull("readConcern", readConcern); - this.serverApi = serverApi; - this.requestContext = notNull("requestContext", requestContext); - operationContext = new OperationContext(); + this.operationContext = notNull("operationContext", operationContext); } @Override @@ -82,22 +72,6 @@ public ReadPreference getReadPreference() { return readPreference; } - @Override - public SessionContext getSessionContext() { - return new ReadConcernAwareNoOpSessionContext(readConcern); - } - - @Override - @Nullable - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return requestContext; - } - @Override public OperationContext getOperationContext() { return operationContext; @@ -140,6 +114,7 @@ private ClusterBindingConnectionSource(final ServerTuple serverTuple, final Read this.server = serverTuple.getServer(); this.serverDescription = serverTuple.getServerDescription(); this.appliedReadPreference = appliedReadPreference; + operationContext.getTimeoutContext().minRoundTripTimeMS(NANOSECONDS.toMillis(serverDescription.getMinRoundTripTimeNanos())); ClusterBinding.this.retain(); } @@ -148,26 +123,11 @@ public ServerDescription getServerDescription() { return serverDescription; } - @Override - public SessionContext getSessionContext() { - return new ReadConcernAwareNoOpSessionContext(readConcern); - } - @Override public OperationContext getOperationContext() { return operationContext; } - @Override - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return requestContext; - } - @Override public ReadPreference getReadPreference() { return appliedReadPreference; diff --git a/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java b/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java index 47bb2be22fb..7d7e948c344 100644 --- a/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java +++ b/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java @@ -17,18 +17,13 @@ package com.mongodb.internal.binding; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; import com.mongodb.ServerAddress; -import com.mongodb.ServerApi; -import com.mongodb.internal.connection.OperationContext; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.Connection; -import com.mongodb.internal.connection.NoOpSessionContext; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.ServerTuple; import com.mongodb.internal.selector.ServerAddressSelector; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; import static com.mongodb.assertions.Assertions.notNull; @@ -40,25 +35,18 @@ public class SingleServerBinding extends AbstractReferenceCounted implements ReadWriteBinding { private final Cluster cluster; private final ServerAddress serverAddress; - @Nullable - private final ServerApi serverApi; - private final RequestContext requestContext; private final OperationContext operationContext; /** * Creates an instance, defaulting to {@link com.mongodb.ReadPreference#primary()} for reads. * @param cluster a non-null Cluster which will be used to select a server to bind to * @param serverAddress a non-null address of the server to bind to - * @param serverApi the server API, which may be null - * @param requestContext the request context, which may not be null + * @param operationContext the operation context */ - public SingleServerBinding(final Cluster cluster, final ServerAddress serverAddress, @Nullable final ServerApi serverApi, - final RequestContext requestContext) { + public SingleServerBinding(final Cluster cluster, final ServerAddress serverAddress, final OperationContext operationContext) { this.cluster = notNull("cluster", cluster); this.serverAddress = notNull("serverAddress", serverAddress); - this.serverApi = serverApi; - this.requestContext = notNull("requestContext", requestContext); - operationContext = new OperationContext(); + this.operationContext = notNull("operationContext", operationContext); } @Override @@ -81,22 +69,6 @@ public ConnectionSource getReadConnectionSource(final int minWireVersion, final throw new UnsupportedOperationException(); } - @Override - public SessionContext getSessionContext() { - return NoOpSessionContext.INSTANCE; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return requestContext; - } - @Override public OperationContext getOperationContext() { return operationContext; @@ -122,26 +94,11 @@ public ServerDescription getServerDescription() { return serverDescription; } - @Override - public SessionContext getSessionContext() { - return NoOpSessionContext.INSTANCE; - } - @Override public OperationContext getOperationContext() { return operationContext; } - @Override - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return requestContext; - } - @Override public ReadPreference getReadPreference() { return ReadPreference.primary(); @@ -149,8 +106,10 @@ public ReadPreference getReadPreference() { @Override public Connection getConnection() { - return cluster.selectServer(new ServerAddressSelector(serverAddress), operationContext) - .getServer().getConnection(operationContext); + return cluster + .selectServer(new ServerAddressSelector(serverAddress), operationContext) + .getServer() + .getConnection(operationContext); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/binding/StaticBindingContext.java b/driver-core/src/main/com/mongodb/internal/binding/StaticBindingContext.java deleted file mode 100644 index e0e7f40ade0..00000000000 --- a/driver-core/src/main/com/mongodb/internal/binding/StaticBindingContext.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.binding; - -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; -import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; - -/** - * - *

    This class is not part of the public API and may be removed or changed at any time

    - */ -public class StaticBindingContext implements BindingContext { - private final SessionContext sessionContext; - private final ServerApi serverApi; - private final RequestContext requestContext; - private final OperationContext operationContext; - - public StaticBindingContext(final SessionContext sessionContext, @Nullable final ServerApi serverApi, - final RequestContext requestContext, final OperationContext operationContext) { - this.sessionContext = sessionContext; - this.serverApi = serverApi; - this.requestContext = requestContext; - this.operationContext = operationContext; - } - - @Override - public SessionContext getSessionContext() { - return sessionContext; - } - - @Nullable - @Override - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return requestContext; - } - - @Override - public OperationContext getOperationContext() { - return operationContext; - } -} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java index 3a87434e9ed..1c7f3ef9858 100644 --- a/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java +++ b/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java @@ -17,6 +17,9 @@ package com.mongodb.internal.client.model; import com.mongodb.CursorType; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.BsonString; @@ -54,6 +57,7 @@ public final class FindOptions { private boolean returnKey; private boolean showRecordId; private Boolean allowDiskUse; + private TimeoutMode timeoutMode; /** * Construct a new instance. @@ -66,7 +70,8 @@ public FindOptions() { final int batchSize, final int limit, final Bson projection, final long maxTimeMS, final long maxAwaitTimeMS, final int skip, final Bson sort, final CursorType cursorType, final boolean noCursorTimeout, final boolean partial, final Collation collation, final BsonValue comment, final Bson hint, final String hintString, final Bson variables, - final Bson max, final Bson min, final boolean returnKey, final boolean showRecordId, final Boolean allowDiskUse) { + final Bson max, final Bson min, final boolean returnKey, final boolean showRecordId, final Boolean allowDiskUse, + final TimeoutMode timeoutMode) { this.batchSize = batchSize; this.limit = limit; this.projection = projection; @@ -87,12 +92,13 @@ public FindOptions() { this.returnKey = returnKey; this.showRecordId = showRecordId; this.allowDiskUse = allowDiskUse; + this.timeoutMode = timeoutMode; } //CHECKSTYLE:ON public FindOptions withBatchSize(final int batchSize) { return new FindOptions(batchSize, limit, projection, maxTimeMS, maxAwaitTimeMS, skip, sort, cursorType, noCursorTimeout, - partial, collation, comment, hint, hintString, variables, max, min, returnKey, showRecordId, allowDiskUse); + partial, collation, comment, hint, hintString, variables, max, min, returnKey, showRecordId, allowDiskUse, timeoutMode); } /** @@ -224,6 +230,41 @@ public FindOptions batchSize(final int batchSize) { return this; } + /** + * Sets the timeoutMode for the cursor. + * + *

    + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@code MongoDatabase} or via {@code MongoCollection} + *

    + *

    + * If the {@code timeout} is set then: + *

      + *
    • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
    • + *
    • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
    • + *
    + *

    + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public FindOptions timeoutMode(final TimeoutMode timeoutMode) { + this.timeoutMode = timeoutMode; + return this; + } + + /** + * @see #timeoutMode(TimeoutMode) + * @return timeout mode + */ + @Alpha(Reason.CLIENT) + @Nullable + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + /** * Gets a document describing the fields to return for all matching documents. * diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java index e1d7d6946cb..137a2f266e3 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java @@ -24,8 +24,10 @@ import com.mongodb.connection.ClusterType; import com.mongodb.connection.ServerDescription; import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.types.ObjectId; @@ -125,7 +127,8 @@ public void close() { } @Override - public ServersSnapshot getServersSnapshot() { + public ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { isTrue("is open", !isClosed()); Map nonAtomicSnapshot = new HashMap<>(addressToServerTupleMap); return serverAddress -> { diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java new file mode 100644 index 00000000000..ba200933860 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.session.SessionContext; + +import static com.mongodb.internal.ExceptionUtils.isMongoSocketException; +import static com.mongodb.internal.ExceptionUtils.isOperationTimeoutFromSocketException; + +/** + *

    This class is not part of the public API and may be removed or changed at any time

    + */ +public abstract class AbstractProtocolExecutor implements ProtocolExecutor { + + protected boolean shouldMarkSessionDirty(final Throwable e, final SessionContext sessionContext) { + if (!sessionContext.hasSession()) { + return false; + } + return isMongoSocketException(e) || isOperationTimeoutFromSocketException(e); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java index 0ba1985b4b0..2891bc28732 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java @@ -20,7 +20,6 @@ import com.mongodb.annotations.ThreadSafe; import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.binding.BindingContext; import com.mongodb.internal.binding.ReferenceCounted; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -46,12 +45,12 @@ public interface AsyncConnection extends ReferenceCounted { ConnectionDescription getDescription(); void commandAsync(String database, BsonDocument command, FieldNameValidator fieldNameValidator, - @Nullable ReadPreference readPreference, Decoder commandResultDecoder, BindingContext context, + @Nullable ReadPreference readPreference, Decoder commandResultDecoder, OperationContext operationContext, SingleResultCallback callback); void commandAsync(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator, @Nullable ReadPreference readPreference, Decoder commandResultDecoder, - BindingContext context, boolean responseExpected, @Nullable SplittablePayload payload, + OperationContext operationContext, boolean responseExpected, @Nullable SplittablePayload payload, @Nullable FieldNameValidator payloadFieldNameValidator, SingleResultCallback callback); void markAsPinned(Connection.PinningMode pinningMode); diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java index 6f2b7e5c172..bbb18497ee4 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java +++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java @@ -20,6 +20,7 @@ import com.mongodb.MongoInternalException; import com.mongodb.MongoSocketReadException; import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoSocketWriteTimeoutException; import com.mongodb.ServerAddress; import com.mongodb.connection.AsyncCompletionHandler; import com.mongodb.connection.SocketSettings; @@ -86,14 +87,15 @@ protected void setChannel(final ExtendedAsynchronousByteChannel channel) { } @Override - public void writeAsync(final List buffers, final AsyncCompletionHandler handler) { + public void writeAsync(final List buffers, final OperationContext operationContext, + final AsyncCompletionHandler handler) { AsyncWritableByteChannelAdapter byteChannel = new AsyncWritableByteChannelAdapter(); Iterator iter = buffers.iterator(); - pipeOneBuffer(byteChannel, iter.next(), new AsyncCompletionHandler() { + pipeOneBuffer(byteChannel, iter.next(), operationContext, new AsyncCompletionHandler() { @Override public void completed(@Nullable final Void t) { if (iter.hasNext()) { - pipeOneBuffer(byteChannel, iter.next(), this); + pipeOneBuffer(byteChannel, iter.next(), operationContext, this); } else { handler.completed(null); } @@ -107,46 +109,31 @@ public void failed(final Throwable t) { } @Override - public void readAsync(final int numBytes, final AsyncCompletionHandler handler) { - readAsync(numBytes, 0, handler); - } - - private void readAsync(final int numBytes, final int additionalTimeout, final AsyncCompletionHandler handler) { + public void readAsync(final int numBytes, final OperationContext operationContext, final AsyncCompletionHandler handler) { ByteBuf buffer = bufferProvider.getBuffer(numBytes); - int timeout = settings.getReadTimeout(MILLISECONDS); - if (timeout > 0 && additionalTimeout > 0) { - timeout += additionalTimeout; - } - - getChannel().read(buffer.asNIO(), timeout, MILLISECONDS, null, new BasicCompletionHandler(buffer, handler)); + long timeout = operationContext.getTimeoutContext().getReadTimeoutMS(); + getChannel().read(buffer.asNIO(), timeout, MILLISECONDS, null, new BasicCompletionHandler(buffer, operationContext, handler)); } @Override - public void open() throws IOException { + public void open(final OperationContext operationContext) throws IOException { FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); - openAsync(handler); + openAsync(operationContext, handler); handler.getOpen(); } @Override - public void write(final List buffers) throws IOException { + public void write(final List buffers, final OperationContext operationContext) throws IOException { FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); - writeAsync(buffers, handler); + writeAsync(buffers, operationContext, handler); handler.getWrite(); } @Override - public ByteBuf read(final int numBytes) throws IOException { + public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException { FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); - readAsync(numBytes, handler); - return handler.getRead(); - } - - @Override - public ByteBuf read(final int numBytes, final int additionalTimeout) throws IOException { - FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); - readAsync(numBytes, additionalTimeout, handler); + readAsync(numBytes, operationContext, handler); return handler.getRead(); } @@ -182,12 +169,12 @@ public ByteBuf getBuffer(final int size) { } private void pipeOneBuffer(final AsyncWritableByteChannelAdapter byteChannel, final ByteBuf byteBuffer, - final AsyncCompletionHandler outerHandler) { - byteChannel.write(byteBuffer.asNIO(), new AsyncCompletionHandler() { + final OperationContext operationContext, final AsyncCompletionHandler outerHandler) { + byteChannel.write(byteBuffer.asNIO(), operationContext, new AsyncCompletionHandler() { @Override public void completed(@Nullable final Void t) { if (byteBuffer.hasRemaining()) { - byteChannel.write(byteBuffer.asNIO(), this); + byteChannel.write(byteBuffer.asNIO(), operationContext, this); } else { outerHandler.completed(null); } @@ -201,8 +188,9 @@ public void failed(final Throwable t) { } private class AsyncWritableByteChannelAdapter { - void write(final ByteBuffer src, final AsyncCompletionHandler handler) { - getChannel().write(src, null, new AsyncWritableByteChannelAdapter.WriteCompletionHandler(handler)); + void write(final ByteBuffer src, final OperationContext operationContext, final AsyncCompletionHandler handler) { + getChannel().write(src, operationContext.getTimeoutContext().getWriteTimeoutMS(), MILLISECONDS, null, + new AsyncWritableByteChannelAdapter.WriteCompletionHandler(handler)); } private class WriteCompletionHandler extends BaseCompletionHandler { @@ -218,19 +206,26 @@ public void completed(final Integer result, final Object attachment) { } @Override - public void failed(final Throwable exc, final Object attachment) { + public void failed(final Throwable t, final Object attachment) { AsyncCompletionHandler localHandler = getHandlerAndClear(); - localHandler.failed(exc); + if (t instanceof InterruptedByTimeoutException) { + localHandler.failed(new MongoSocketWriteTimeoutException("Timeout while writing message", serverAddress, t)); + } else { + localHandler.failed(t); + } } } } private final class BasicCompletionHandler extends BaseCompletionHandler { private final AtomicReference byteBufReference; + private final OperationContext operationContext; - private BasicCompletionHandler(final ByteBuf dst, final AsyncCompletionHandler handler) { + private BasicCompletionHandler(final ByteBuf dst, final OperationContext operationContext, + final AsyncCompletionHandler handler) { super(handler); this.byteBufReference = new AtomicReference<>(dst); + this.operationContext = operationContext; } @Override @@ -244,8 +239,8 @@ public void completed(final Integer result, final Void attachment) { localByteBuf.flip(); localHandler.completed(localByteBuf); } else { - getChannel().read(localByteBuf.asNIO(), settings.getReadTimeout(MILLISECONDS), MILLISECONDS, null, - new BasicCompletionHandler(localByteBuf, localHandler)); + getChannel().read(localByteBuf.asNIO(), operationContext.getTimeoutContext().getReadTimeoutMS(), MILLISECONDS, null, + new BasicCompletionHandler(localByteBuf, operationContext, localHandler)); } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java index cb1e2a54868..4818b1f7ac4 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java +++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java @@ -56,7 +56,7 @@ public AsynchronousSocketChannelStream(final ServerAddress serverAddress, final } @Override - public void openAsync(final AsyncCompletionHandler handler) { + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { isTrue("unopened", getChannel() == null); Queue socketAddressQueue; diff --git a/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java b/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java index 232eeb45049..cd1809966b0 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java +++ b/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java @@ -96,19 +96,20 @@ T getNonNullMechanismProperty(final String key, @Nullable final T defaultVal } - abstract void authenticate(InternalConnection connection, ConnectionDescription connectionDescription); + abstract void authenticate(InternalConnection connection, ConnectionDescription connectionDescription, + OperationContext operationContext); abstract void authenticateAsync(InternalConnection connection, ConnectionDescription connectionDescription, - SingleResultCallback callback); + OperationContext operationContext, SingleResultCallback callback); - public void reauthenticate(final InternalConnection connection) { - authenticate(connection, connection.getDescription()); + public void reauthenticate(final InternalConnection connection, final OperationContext operationContext) { + authenticate(connection, connection.getDescription(), operationContext); } - public void reauthenticateAsync(final InternalConnection connection, final SingleResultCallback callback) { + public void reauthenticateAsync(final InternalConnection connection, final OperationContext operationContext, + final SingleResultCallback callback) { beginAsync().thenRun((c) -> { - authenticateAsync(connection, connection.getDescription(), c); + authenticateAsync(connection, connection.getDescription(), operationContext, c); }).finish(callback); } - } diff --git a/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java index 292822244b7..df3e4d1c1fe 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java @@ -19,6 +19,8 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoException; import com.mongodb.MongoIncompatibleDriverException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoTimeoutException; import com.mongodb.ServerAddress; import com.mongodb.UnixServerAddress; @@ -31,6 +33,7 @@ import com.mongodb.event.ClusterDescriptionChangedEvent; import com.mongodb.event.ClusterListener; import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.connection.OperationContext.ServerDeprioritization; @@ -42,6 +45,7 @@ import com.mongodb.internal.selector.AtMostTwoRandomServerSelector; import com.mongodb.internal.selector.LatencyMinimizingServerSelector; import com.mongodb.internal.selector.MinimumOperationCountServerSelector; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.selector.CompositeServerSelector; import com.mongodb.selector.ServerSelector; @@ -78,7 +82,7 @@ import static com.mongodb.internal.logging.LogMessage.Entry.Name.TOPOLOGY_DESCRIPTION; import static com.mongodb.internal.logging.LogMessage.Level.DEBUG; import static com.mongodb.internal.logging.LogMessage.Level.INFO; -import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -121,58 +125,46 @@ public ClusterClock getClock() { public ServerTuple selectServer(final ServerSelector serverSelector, final OperationContext operationContext) { isTrue("open", !isClosed()); - try { - CountDownLatch currentPhase = phase.get(); - ClusterDescription curDescription = description; - logServerSelectionStarted(clusterId, operationContext, serverSelector, curDescription); - ServerDeprioritization serverDeprioritization = operationContext.getServerDeprioritization(); - ServerTuple serverTuple = createCompleteSelectorAndSelectServer(serverSelector, curDescription, serverDeprioritization); - - boolean selectionWaitingLogged = false; - - long startTimeNanos = System.nanoTime(); - long curTimeNanos = startTimeNanos; - Long maxWaitTimeNanos = getMaxWaitTimeNanos(); - - while (true) { - if (!curDescription.isCompatibleWithDriver()) { - throw createAndLogIncompatibleException(operationContext, serverSelector, curDescription); - } - - if (serverTuple != null) { - ServerAddress serverAddress = serverTuple.getServerDescription().getAddress(); - logServerSelectionSucceeded( - clusterId, operationContext, serverAddress, serverSelector, curDescription); - serverDeprioritization.updateCandidate(serverAddress); - return serverTuple; - } - - Long remainingTimeNanos = maxWaitTimeNanos == null ? null : maxWaitTimeNanos - (curTimeNanos - startTimeNanos); - - if (remainingTimeNanos != null && remainingTimeNanos <= 0) { - throw createAndLogTimeoutException(operationContext, serverSelector, curDescription); - } - - if (!selectionWaitingLogged) { - logServerSelectionWaiting(clusterId, operationContext, remainingTimeNanos, serverSelector, curDescription); - selectionWaitingLogged = true; - } - - connect(); - - currentPhase.await( - remainingTimeNanos == null ? getMinWaitTimeNanos() : Math.min(remainingTimeNanos, getMinWaitTimeNanos()), - NANOSECONDS); - - curTimeNanos = System.nanoTime(); + ServerDeprioritization serverDeprioritization = operationContext.getServerDeprioritization(); + boolean selectionWaitingLogged = false; + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + logServerSelectionStarted(clusterId, operationContext.getId(), serverSelector, description); + while (true) { + CountDownLatch currentPhaseLatch = phase.get(); + ClusterDescription currentDescription = description; + ServerTuple serverTuple = createCompleteSelectorAndSelectServer( + serverSelector, currentDescription, serverDeprioritization, + computedServerSelectionTimeout, operationContext.getTimeoutContext()); + + if (!currentDescription.isCompatibleWithDriver()) { + logAndThrowIncompatibleException(operationContext.getId(), serverSelector, currentDescription); + } + if (serverTuple != null) { + ServerAddress serverAddress = serverTuple.getServerDescription().getAddress(); + logServerSelectionSucceeded( + clusterId, + operationContext.getId(), + serverAddress, + serverSelector, + currentDescription); + serverDeprioritization.updateCandidate(serverAddress); + return serverTuple; + } + computedServerSelectionTimeout.onExpired(() -> + logAndThrowTimeoutException(operationContext, serverSelector, currentDescription)); - currentPhase = phase.get(); - curDescription = description; - serverTuple = createCompleteSelectorAndSelectServer(serverSelector, curDescription, serverDeprioritization); + if (!selectionWaitingLogged) { + logServerSelectionWaiting(clusterId, operationContext.getId(), computedServerSelectionTimeout, serverSelector, currentDescription); + selectionWaitingLogged = true; } + connect(); + + Timeout heartbeatLimitedTimeout = Timeout.earliest( + computedServerSelectionTimeout, + startMinWaitHeartbeatTimeout()); - } catch (InterruptedException e) { - throw interruptAndCreateMongoInterruptedException(format("Interrupted while waiting for a server that matches %s", serverSelector), e); + heartbeatLimitedTimeout.awaitOn(currentPhaseLatch, + () -> format("waiting for a server that matches %s", serverSelector)); } } @@ -181,11 +173,18 @@ public void selectServerAsync(final ServerSelector serverSelector, final Operati final SingleResultCallback callback) { isTrue("open", !isClosed()); + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + ServerSelectionRequest request = new ServerSelectionRequest( + serverSelector, operationContext, computedServerSelectionTimeout, callback); + CountDownLatch currentPhase = phase.get(); ClusterDescription currentDescription = description; - logServerSelectionStarted(clusterId, operationContext, serverSelector, currentDescription); - ServerSelectionRequest request = new ServerSelectionRequest(operationContext, serverSelector, getMaxWaitTimeNanos(), callback); + logServerSelectionStarted( + clusterId, + operationContext.getId(), + serverSelector, + currentDescription); if (!handleServerSelectionRequest(request, currentPhase, currentDescription)) { notifyWaitQueueHandler(request); @@ -257,50 +256,60 @@ private void updatePhase() { withLock(() -> phase.getAndSet(new CountDownLatch(1)).countDown()); } - @Nullable - private Long getMaxWaitTimeNanos() { - if (settings.getServerSelectionTimeout(NANOSECONDS) < 0) { - return null; - } - return settings.getServerSelectionTimeout(NANOSECONDS); + private Timeout startMinWaitHeartbeatTimeout() { + long minHeartbeatFrequency = serverFactory.getSettings().getMinHeartbeatFrequency(NANOSECONDS); + minHeartbeatFrequency = Math.max(0, minHeartbeatFrequency); + return Timeout.expiresIn(minHeartbeatFrequency, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); } - private long getMinWaitTimeNanos() { - return serverFactory.getSettings().getMinHeartbeatFrequency(NANOSECONDS); - } + private boolean handleServerSelectionRequest( + final ServerSelectionRequest request, final CountDownLatch currentPhase, + final ClusterDescription description) { - private boolean handleServerSelectionRequest(final ServerSelectionRequest request, final CountDownLatch currentPhase, - final ClusterDescription description) { try { + OperationContext operationContext = request.getOperationContext(); + long operationId = operationContext.getId(); if (currentPhase != request.phase) { CountDownLatch prevPhase = request.phase; request.phase = currentPhase; if (!description.isCompatibleWithDriver()) { - request.onResult(null, createAndLogIncompatibleException(request.operationContext, request.originalSelector, description)); - return true; + logAndThrowIncompatibleException(operationId, request.originalSelector, description); } + ServerDeprioritization serverDeprioritization = request.operationContext.getServerDeprioritization(); - ServerTuple serverTuple = createCompleteSelectorAndSelectServer(request.originalSelector, description, serverDeprioritization); + ServerTuple serverTuple = createCompleteSelectorAndSelectServer( + request.originalSelector, + description, + serverDeprioritization, + request.getTimeout(), + operationContext.getTimeoutContext()); + if (serverTuple != null) { ServerAddress serverAddress = serverTuple.getServerDescription().getAddress(); - logServerSelectionSucceeded(clusterId, request.operationContext, serverAddress, - request.originalSelector, description); + logServerSelectionSucceeded( + clusterId, + operationId, + serverAddress, + request.originalSelector, + description); serverDeprioritization.updateCandidate(serverAddress); request.onResult(serverTuple, null); return true; } if (prevPhase == null) { logServerSelectionWaiting( - clusterId, request.operationContext, request.getRemainingTime(), request.originalSelector, description); + clusterId, + operationId, + request.getTimeout(), + request.originalSelector, + description); } } - if (request.timedOut()) { - request.onResult(null, createAndLogTimeoutException(request.operationContext, request.originalSelector, description)); - return true; - } - + Timeout.onExistsAndExpired(request.getTimeout(), () -> { + logAndThrowTimeoutException(operationContext, request.originalSelector, description); + }); return false; } catch (Exception e) { request.onResult(null, e); @@ -312,9 +321,15 @@ private boolean handleServerSelectionRequest(final ServerSelectionRequest reques private ServerTuple createCompleteSelectorAndSelectServer( final ServerSelector serverSelector, final ClusterDescription clusterDescription, - final ServerDeprioritization serverDeprioritization) { + final ServerDeprioritization serverDeprioritization, + final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { return createCompleteSelectorAndSelectServer( - serverSelector, clusterDescription, getServersSnapshot(), serverDeprioritization, settings); + serverSelector, + clusterDescription, + getServersSnapshot(serverSelectionTimeout, timeoutContext), + serverDeprioritization, + settings); } @Nullable @@ -372,13 +387,13 @@ protected ClusterableServer createServer(final ServerAddress serverAddress) { return serverFactory.create(this, serverAddress); } - private MongoIncompatibleDriverException createAndLogIncompatibleException( - final OperationContext operationContext, + private void logAndThrowIncompatibleException( + final long operationId, final ServerSelector serverSelector, final ClusterDescription clusterDescription) { MongoIncompatibleDriverException exception = createIncompatibleException(clusterDescription); - logServerSelectionFailed(clusterId, operationContext, exception, serverSelector, clusterDescription); - return exception; + logServerSelectionFailed(clusterId, operationId, exception, serverSelector, clusterDescription); + throw exception; } private MongoIncompatibleDriverException createIncompatibleException(final ClusterDescription curDescription) { @@ -400,34 +415,36 @@ private MongoIncompatibleDriverException createIncompatibleException(final Clust return new MongoIncompatibleDriverException(message, curDescription); } - private MongoException createAndLogTimeoutException( + private void logAndThrowTimeoutException( final OperationContext operationContext, final ServerSelector serverSelector, final ClusterDescription clusterDescription) { - MongoTimeoutException exception = new MongoTimeoutException(format( + String message = format( "Timed out while waiting for a server that matches %s. Client view of cluster state is %s", - serverSelector, clusterDescription.getShortDescription())); - logServerSelectionFailed(clusterId, operationContext, exception, serverSelector, clusterDescription); - return exception; + serverSelector, clusterDescription.getShortDescription()); + + MongoTimeoutException exception = operationContext.getTimeoutContext().hasTimeoutMS() + ? new MongoOperationTimeoutException(message) : new MongoTimeoutException(message); + + logServerSelectionFailed(clusterId, operationContext.getId(), exception, serverSelector, clusterDescription); + throw exception; } private static final class ServerSelectionRequest { - private final OperationContext operationContext; private final ServerSelector originalSelector; - @Nullable - private final Long maxWaitTimeNanos; private final SingleResultCallback callback; - private final long startTimeNanos = System.nanoTime(); + private final OperationContext operationContext; + private final Timeout timeout; private CountDownLatch phase; - ServerSelectionRequest(final OperationContext operationContext, - final ServerSelector serverSelector, - @Nullable - final Long maxWaitTimeNanos, - final SingleResultCallback callback) { - this.operationContext = operationContext; + ServerSelectionRequest( + final ServerSelector serverSelector, + final OperationContext operationContext, + final Timeout timeout, + final SingleResultCallback callback) { this.originalSelector = serverSelector; - this.maxWaitTimeNanos = maxWaitTimeNanos; + this.operationContext = operationContext; + this.timeout = timeout; this.callback = callback; } @@ -439,14 +456,12 @@ void onResult(@Nullable final ServerTuple serverTuple, @Nullable final Throwable } } - boolean timedOut() { - Long remainingTimeNanos = getRemainingTime(); - return remainingTimeNanos != null && remainingTimeNanos <= 0; + Timeout getTimeout() { + return timeout; } - @Nullable - Long getRemainingTime() { - return maxWaitTimeNanos == null ? null : maxWaitTimeNanos - (System.nanoTime() - startTimeNanos); + public OperationContext getOperationContext() { + return operationContext; } } @@ -477,31 +492,37 @@ private void stopWaitQueueHandler() { } private final class WaitQueueHandler implements Runnable { + + WaitQueueHandler() { + } + public void run() { while (!isClosed) { CountDownLatch currentPhase = phase.get(); ClusterDescription curDescription = description; - long waitTimeNanos = Long.MAX_VALUE; + Timeout timeout = Timeout.infinite(); + boolean someWaitersNotSatisfied = false; for (Iterator iter = waitQueue.iterator(); iter.hasNext();) { - ServerSelectionRequest nextRequest = iter.next(); - if (handleServerSelectionRequest(nextRequest, currentPhase, curDescription)) { + ServerSelectionRequest currentRequest = iter.next(); + if (handleServerSelectionRequest(currentRequest, currentPhase, curDescription)) { iter.remove(); } else { - Long remainingTimeNanos = nextRequest.getRemainingTime(); - long minWaitTimeNanos = Math.min(getMinWaitTimeNanos(), waitTimeNanos); - waitTimeNanos = remainingTimeNanos == null ? minWaitTimeNanos : Math.min(remainingTimeNanos, minWaitTimeNanos); + someWaitersNotSatisfied = true; + timeout = Timeout.earliest( + timeout, + currentRequest.getTimeout(), + startMinWaitHeartbeatTimeout()); } } - // if there are any waiters that were not satisfied, connect - if (waitTimeNanos < Long.MAX_VALUE) { + if (someWaitersNotSatisfied) { connect(); } try { - currentPhase.await(waitTimeNanos, NANOSECONDS); - } catch (InterruptedException closed) { + timeout.awaitOn(currentPhase, () -> "ignored"); + } catch (MongoInterruptedException closed) { // The cluster has been closed and the while loop will exit. } } @@ -515,7 +536,7 @@ public void run() { static void logServerSelectionStarted( final ClusterId clusterId, - final OperationContext operationContext, + final long operationId, final ServerSelector serverSelector, final ClusterDescription clusterDescription) { if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) { @@ -523,7 +544,7 @@ static void logServerSelectionStarted( SERVER_SELECTION, DEBUG, "Server selection started", clusterId, asList( new Entry(OPERATION, null), - new Entry(OPERATION_ID, operationContext.getId()), + new Entry(OPERATION_ID, operationId), new Entry(SELECTOR, serverSelector.toString()), new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())), "Server selection started for operation[ {}] with ID {}. Selector: {}, topology description: {}")); @@ -532,9 +553,8 @@ static void logServerSelectionStarted( private static void logServerSelectionWaiting( final ClusterId clusterId, - final OperationContext operationContext, - @Nullable - final Long remainingTimeNanos, + final long operationId, + final Timeout timeout, final ServerSelector serverSelector, final ClusterDescription clusterDescription) { if (STRUCTURED_LOGGER.isRequired(INFO, clusterId)) { @@ -542,8 +562,11 @@ private static void logServerSelectionWaiting( SERVER_SELECTION, INFO, "Waiting for suitable server to become available", clusterId, asList( new Entry(OPERATION, null), - new Entry(OPERATION_ID, operationContext.getId()), - new Entry(REMAINING_TIME_MS, remainingTimeNanos == null ? null : NANOSECONDS.toMillis(remainingTimeNanos)), + new Entry(OPERATION_ID, operationId), + timeout.call(MILLISECONDS, + () -> new Entry(REMAINING_TIME_MS, "infinite"), + (ms) -> new Entry(REMAINING_TIME_MS, ms), + () -> new Entry(REMAINING_TIME_MS, 0L)), new Entry(SELECTOR, serverSelector.toString()), new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())), "Waiting for server to become available for operation[ {}] with ID {}.[ Remaining time: {} ms.]" @@ -553,7 +576,7 @@ private static void logServerSelectionWaiting( private static void logServerSelectionFailed( final ClusterId clusterId, - final OperationContext operationContext, + final long operationId, final MongoException failure, final ServerSelector serverSelector, final ClusterDescription clusterDescription) { @@ -568,7 +591,7 @@ private static void logServerSelectionFailed( SERVER_SELECTION, DEBUG, "Server selection failed", clusterId, asList( new Entry(OPERATION, null), - new Entry(OPERATION_ID, operationContext.getId()), + new Entry(OPERATION_ID, operationId), new Entry(FAILURE, failureDescription), new Entry(SELECTOR, serverSelector.toString()), new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())), @@ -578,7 +601,7 @@ private static void logServerSelectionFailed( static void logServerSelectionSucceeded( final ClusterId clusterId, - final OperationContext operationContext, + final long operationId, final ServerAddress serverAddress, final ServerSelector serverSelector, final ClusterDescription clusterDescription) { @@ -587,7 +610,7 @@ static void logServerSelectionSucceeded( SERVER_SELECTION, DEBUG, "Server selection succeeded", clusterId, asList( new Entry(OPERATION, null), - new Entry(OPERATION_ID, operationContext.getId()), + new Entry(OPERATION_ID, operationId), new Entry(SERVER_HOST, serverAddress.getHost()), new Entry(SERVER_PORT, serverAddress instanceof UnixServerAddress ? null : serverAddress.getPort()), new Entry(SELECTOR, serverSelector.toString()), diff --git a/driver-core/src/main/com/mongodb/internal/connection/Cluster.java b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java index 358eb90a175..a6d4a026608 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/Cluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java @@ -19,11 +19,13 @@ import com.mongodb.ServerAddress; import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.connection.ClusterDescription; -import com.mongodb.connection.ClusterSettings; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.selector.ServerSelector; @@ -41,7 +43,7 @@ public interface Cluster extends Closeable { ClusterId getClusterId(); - ServersSnapshot getServersSnapshot(); + ServersSnapshot getServersSnapshot(Timeout serverSelectionTimeout, TimeoutContext timeoutContext); /** * Get the current description of this cluster. diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java index dc0df6ac27e..31737d7b22b 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java @@ -20,7 +20,6 @@ import com.mongodb.MongoServerException; import com.mongodb.ServerApi; import com.mongodb.connection.ClusterConnectionMode; -import com.mongodb.internal.IgnorableRequestContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; @@ -44,27 +43,30 @@ public final class CommandHelper { static final String LEGACY_HELLO_LOWER = LEGACY_HELLO.toLowerCase(Locale.ROOT); static BsonDocument executeCommand(final String database, final BsonDocument command, final ClusterConnectionMode clusterConnectionMode, - @Nullable final ServerApi serverApi, final InternalConnection internalConnection) { - return sendAndReceive(database, command, clusterConnectionMode, serverApi, internalConnection); + @Nullable final ServerApi serverApi, final InternalConnection internalConnection, final OperationContext operationContext) { + return sendAndReceive(database, command, clusterConnectionMode, serverApi, internalConnection, operationContext); } static BsonDocument executeCommandWithoutCheckingForFailure(final String database, final BsonDocument command, - final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi, - final InternalConnection internalConnection) { + final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi, + final InternalConnection internalConnection, final OperationContext operationContext) { try { - return sendAndReceive(database, command, clusterConnectionMode, serverApi, internalConnection); + return executeCommand(database, command, clusterConnectionMode, serverApi, internalConnection, operationContext); } catch (MongoServerException e) { return new BsonDocument(); } } - static void executeCommandAsync(final String database, final BsonDocument command, final ClusterConnectionMode clusterConnectionMode, - @Nullable final ServerApi serverApi, final InternalConnection internalConnection, + static void executeCommandAsync(final String database, + final BsonDocument command, + final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi, + final InternalConnection internalConnection, + final OperationContext operationContext, final SingleResultCallback callback) { internalConnection.sendAndReceiveAsync( getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi), - new BsonDocumentCodec(), - NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext(), (result, t) -> { + new BsonDocumentCodec(), operationContext, (result, t) -> { if (t != null) { callback.onResult(null, t); } else { @@ -88,11 +90,15 @@ static boolean isCommandOk(final BsonDocument response) { } private static BsonDocument sendAndReceive(final String database, final BsonDocument command, - final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi, - final InternalConnection internalConnection) { - return assertNotNull(internalConnection.sendAndReceive(getCommandMessage(database, command, internalConnection, - clusterConnectionMode, serverApi), new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext())); + final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi, + final InternalConnection internalConnection, + final OperationContext operationContext) { + return assertNotNull( + internalConnection.sendAndReceive( + getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi), + new BsonDocumentCodec(), operationContext) + ); } private static CommandMessage getCommandMessage(final String database, final BsonDocument command, @@ -106,6 +112,7 @@ private static CommandMessage getCommandMessage(final String database, final Bso // which means OP_MSG will not be used .maxWireVersion(internalConnection.getDescription().getMaxWireVersion()) .serverType(internalConnection.getDescription().getServerType()) + .cryptd(internalConnection.getInitialServerDescription().isCryptd()) .build(), clusterConnectionMode, serverApi); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java index 24b30d60acb..53d869a6b8f 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java @@ -21,6 +21,7 @@ import com.mongodb.ReadPreference; import com.mongodb.ServerApi; import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonArray; @@ -142,7 +143,7 @@ MongoNamespace getNamespace() { } @Override - protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final SessionContext sessionContext) { + protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final OperationContext operationContext) { int messageStartPosition = bsonOutput.getPosition() - MESSAGE_PROLOGUE_LENGTH; int commandStartPosition; if (useOpMsg()) { @@ -151,7 +152,7 @@ protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOu bsonOutput.writeByte(0); // payload type commandStartPosition = bsonOutput.getPosition(); - addDocument(command, bsonOutput, commandFieldNameValidator, getExtraElements(sessionContext)); + addDocument(command, bsonOutput, commandFieldNameValidator, getExtraElements(operationContext)); if (payload != null) { bsonOutput.writeByte(1); // payload type @@ -214,8 +215,16 @@ private boolean useOpMsg() { return getOpCode().equals(OpCode.OP_MSG); } - private List getExtraElements(final SessionContext sessionContext) { + private List getExtraElements(final OperationContext operationContext) { + SessionContext sessionContext = operationContext.getSessionContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + List extraElements = new ArrayList<>(); + if (!getSettings().isCryptd()) { + timeoutContext.runMaxTimeMS(maxTimeMS -> + extraElements.add(new BsonElement("maxTimeMS", new BsonInt64(maxTimeMS))) + ); + } extraElements.add(new BsonElement("$db", new BsonString(new MongoNamespace(getCollectionName()).getDatabaseName()))); if (sessionContext.getClusterTime() != null) { extraElements.add(new BsonElement("$clusterTime", sessionContext.getClusterTime())); diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java index 7fab16b30a3..2cc78497980 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java @@ -30,5 +30,5 @@ public interface CommandProtocol { void executeAsync(InternalConnection connection, SingleResultCallback callback); - CommandProtocol sessionContext(SessionContext sessionContext); + CommandProtocol withSessionContext(SessionContext sessionContext); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java index 251b4f21d2d..de9e0666d40 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java @@ -18,8 +18,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.session.SessionContext; @@ -42,16 +40,12 @@ class CommandProtocolImpl implements CommandProtocol { private final Decoder commandResultDecoder; private final boolean responseExpected; private final ClusterConnectionMode clusterConnectionMode; - private final RequestContext requestContext; - private SessionContext sessionContext; - private final ServerApi serverApi; private final OperationContext operationContext; CommandProtocolImpl(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, - final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi, final RequestContext requestContext, - final OperationContext operationContext) { + final ClusterConnectionMode clusterConnectionMode, final OperationContext operationContext) { notNull("database", database); this.namespace = new MongoNamespace(notNull("database", database), MongoNamespace.COMMAND_COLLECTION_NAME); this.command = notNull("command", command); @@ -62,8 +56,6 @@ class CommandProtocolImpl implements CommandProtocol { this.payload = payload; this.payloadFieldNameValidator = payloadFieldNameValidator; this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); - this.serverApi = serverApi; - this.requestContext = notNull("requestContext", requestContext); this.operationContext = operationContext; isTrueArgument("payloadFieldNameValidator cannot be null if there is a payload.", @@ -73,15 +65,14 @@ class CommandProtocolImpl implements CommandProtocol { @Nullable @Override public T execute(final InternalConnection connection) { - return connection.sendAndReceive(getCommandMessage(connection), commandResultDecoder, sessionContext, requestContext, - operationContext); + return connection.sendAndReceive(getCommandMessage(connection), commandResultDecoder, operationContext); } @Override public void executeAsync(final InternalConnection connection, final SingleResultCallback callback) { try { - connection.sendAndReceiveAsync(getCommandMessage(connection), commandResultDecoder, sessionContext, requestContext, - operationContext, (result, t) -> { + connection.sendAndReceiveAsync(getCommandMessage(connection), commandResultDecoder, operationContext, + (result, t) -> { if (t != null) { callback.onResult(null, t); } else { @@ -94,14 +85,15 @@ public void executeAsync(final InternalConnection connection, final SingleResult } @Override - public CommandProtocolImpl sessionContext(final SessionContext sessionContext) { - this.sessionContext = sessionContext; - return this; + public CommandProtocolImpl withSessionContext(final SessionContext sessionContext) { + return new CommandProtocolImpl<>(namespace.getDatabaseName(), command, commandFieldNameValidator, readPreference, + commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, + operationContext.withSessionContext(sessionContext)); } private CommandMessage getCommandMessage(final InternalConnection connection) { return new CommandMessage(namespace, command, commandFieldNameValidator, readPreference, - getMessageSettings(connection.getDescription()), responseExpected, payload, - payloadFieldNameValidator, clusterConnectionMode, serverApi); + getMessageSettings(connection.getDescription(), connection.getInitialServerDescription()), responseExpected, payload, + payloadFieldNameValidator, clusterConnectionMode, operationContext.getServerApi()); } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java index 698fe2ece9f..9880ef3fb0b 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java @@ -16,7 +16,6 @@ package com.mongodb.internal.connection; -import com.mongodb.internal.session.SessionContext; import org.bson.ByteBuf; import org.bson.io.BsonOutput; @@ -38,7 +37,7 @@ class CompressedMessage extends RequestMessage { } @Override - protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final SessionContext sessionContext) { + protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final OperationContext operationContext) { bsonOutput.writeInt32(wrappedOpcode.getValue()); bsonOutput.writeInt32(getWrappedMessageSize(wrappedMessageBuffers) - MESSAGE_HEADER_LENGTH); bsonOutput.writeByte(compressor.getId()); diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java b/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java index c174e828bde..fe3ac129631 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java +++ b/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java @@ -23,8 +23,7 @@ import com.mongodb.MongoTimeoutException; import com.mongodb.annotations.ThreadSafe; import com.mongodb.internal.VisibleForTesting; -import com.mongodb.internal.time.TimePoint; -import com.mongodb.internal.time.Timeout; +import com.mongodb.internal.time.StartTime; import com.mongodb.lang.Nullable; import java.util.Deque; @@ -147,7 +146,7 @@ public T get() { * Gets an object from the pool. Blocks until an object is available, or the specified {@code timeout} expires, * or the pool is {@linkplain #close() closed}/{@linkplain #pause(Supplier) paused}. * - * @param timeout See {@link Timeout#started(long, TimeUnit, TimePoint)}. + * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}. * @param timeUnit the time unit of the timeout * @return An object from the pool, or null if can't get one in the given waitTime * @throws MongoTimeoutException if the timeout has been exceeded @@ -231,7 +230,7 @@ private T createNewAndReleasePermitIfFailure() { } /** - * @param timeout See {@link Timeout#started(long, TimeUnit, TimePoint)}. + * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}. */ @VisibleForTesting(otherwise = PRIVATE) boolean acquirePermit(final long timeout, final TimeUnit timeUnit) { @@ -388,7 +387,7 @@ boolean acquirePermitImmediateUnfair() { * This method also emulates the eager {@link InterruptedException} behavior of * {@link java.util.concurrent.Semaphore#tryAcquire(long, TimeUnit)}. * - * @param timeout See {@link Timeout#started(long, TimeUnit, TimePoint)}. + * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}. */ boolean acquirePermit(final long timeout, final TimeUnit unit) throws MongoInterruptedException { long remainingNanos = unit.toNanos(timeout); diff --git a/driver-core/src/main/com/mongodb/internal/connection/Connection.java b/driver-core/src/main/com/mongodb/internal/connection/Connection.java index 6200a626897..95094b240c1 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/Connection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/Connection.java @@ -19,7 +19,6 @@ import com.mongodb.ReadPreference; import com.mongodb.annotations.ThreadSafe; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.binding.BindingContext; import com.mongodb.internal.binding.ReferenceCounted; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -47,11 +46,11 @@ public interface Connection extends ReferenceCounted { @Nullable T command(String database, BsonDocument command, FieldNameValidator fieldNameValidator, @Nullable ReadPreference readPreference, - Decoder commandResultDecoder, BindingContext context); + Decoder commandResultDecoder, OperationContext operationContext); @Nullable T command(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator, - @Nullable ReadPreference readPreference, Decoder commandResultDecoder, BindingContext context, + @Nullable ReadPreference readPreference, Decoder commandResultDecoder, OperationContext operationContext, boolean responseExpected, @Nullable SplittablePayload payload, @Nullable FieldNameValidator payloadFieldNameValidator); diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java b/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java index 39a50063163..2129d42b941 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java +++ b/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java @@ -18,15 +18,11 @@ import com.mongodb.MongoConnectionPoolClearedException; import com.mongodb.annotations.ThreadSafe; -import com.mongodb.connection.ConnectionPoolSettings; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.time.Timeout; -import com.mongodb.internal.time.TimePoint; -import org.bson.types.ObjectId; import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; import java.io.Closeable; -import java.util.concurrent.TimeUnit; /** * An instance of an implementation must be created in the {@linkplain #invalidate(Throwable) paused} state. @@ -34,19 +30,10 @@ @ThreadSafe interface ConnectionPool extends Closeable { /** - * Is equivalent to {@link #get(OperationContext, long, TimeUnit)} called with {@link ConnectionPoolSettings#getMaxWaitTime(TimeUnit)}. - */ - InternalConnection get(OperationContext operationContext) throws MongoConnectionPoolClearedException; - - /** - * @param operationContext operation context - * @param timeout This is not a timeout for the whole {@link #get(OperationContext, long, TimeUnit)}, - * see {@link ConnectionPoolSettings#getMaxWaitTime(TimeUnit)}. - *

    - * See {@link Timeout#started(long, TimeUnit, TimePoint)}.

    + * @param operationContext the operation context * @throws MongoConnectionPoolClearedException If detects that the pool is {@linkplain #invalidate(Throwable) paused}. */ - InternalConnection get(OperationContext operationContext, long timeout, TimeUnit timeUnit) throws MongoConnectionPoolClearedException; + InternalConnection get(OperationContext operationContext) throws MongoConnectionPoolClearedException; /** * Completes the {@code callback} with a {@link MongoConnectionPoolClearedException} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java index 13e7ec09a16..a9a3525a90a 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java @@ -46,10 +46,11 @@ class DefaultAuthenticator extends Authenticator implements SpeculativeAuthentic } @Override - void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) { + void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { try { setDelegate(connectionDescription); - delegate.authenticate(connection, connectionDescription); + delegate.authenticate(connection, connectionDescription, operationContext); } catch (Exception e) { throw wrapException(e); } @@ -57,9 +58,9 @@ void authenticate(final InternalConnection connection, final ConnectionDescripti @Override void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { setDelegate(connectionDescription); - delegate.authenticateAsync(connection, connectionDescription, callback); + delegate.authenticateAsync(connection, connectionDescription, operationContext, callback); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java index 0375373c23b..5fb6de6f69a 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java @@ -31,6 +31,7 @@ import com.mongodb.event.CommandListener; import com.mongodb.event.ServerListener; import com.mongodb.event.ServerMonitorListener; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; @@ -60,7 +61,10 @@ public final class DefaultClusterFactory { public Cluster createCluster(final ClusterSettings originalClusterSettings, final ServerSettings originalServerSettings, final ConnectionPoolSettings connectionPoolSettings, final InternalConnectionPoolSettings internalConnectionPoolSettings, - final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory, + final TimeoutSettings clusterTimeoutSettings, + final StreamFactory streamFactory, + final TimeoutSettings heartbeatTimeoutSettings, + final StreamFactory heartbeatStreamFactory, @Nullable final MongoCredential credential, final LoggerSettings loggerSettings, @Nullable final CommandListener commandListener, @@ -98,17 +102,22 @@ public Cluster createCluster(final ClusterSettings originalClusterSettings, fina } DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = new DefaultDnsSrvRecordMonitorFactory(clusterId, serverSettings, dnsClient); + InternalOperationContextFactory clusterOperationContextFactory = + new InternalOperationContextFactory(clusterTimeoutSettings, serverApi); + InternalOperationContextFactory heartBeatOperationContextFactory = + new InternalOperationContextFactory(heartbeatTimeoutSettings, serverApi); if (clusterSettings.getMode() == ClusterConnectionMode.LOAD_BALANCED) { ClusterableServerFactory serverFactory = new LoadBalancedClusterableServerFactory(serverSettings, connectionPoolSettings, internalConnectionPoolSettings, streamFactory, credential, loggerSettings, commandListener, applicationName, mongoDriverInformation != null ? mongoDriverInformation : MongoDriverInformation.builder().build(), - compressorList, serverApi); + compressorList, serverApi, clusterOperationContextFactory); return new LoadBalancedCluster(clusterId, clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); } else { ClusterableServerFactory serverFactory = new DefaultClusterableServerFactory(serverSettings, connectionPoolSettings, internalConnectionPoolSettings, - streamFactory, heartbeatStreamFactory, credential, loggerSettings, commandListener, applicationName, + clusterOperationContextFactory, streamFactory, heartBeatOperationContextFactory, heartbeatStreamFactory, credential, + loggerSettings, commandListener, applicationName, mongoDriverInformation != null ? mongoDriverInformation : MongoDriverInformation.builder().build(), compressorList, serverApi, FaasEnvironment.getFaasEnvironment() != FaasEnvironment.UNKNOWN); diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java index 7d0f5b62e51..880e1db8521 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java @@ -43,9 +43,11 @@ public class DefaultClusterableServerFactory implements ClusterableServerFactory private final ServerSettings serverSettings; private final ConnectionPoolSettings connectionPoolSettings; private final InternalConnectionPoolSettings internalConnectionPoolSettings; + private final InternalOperationContextFactory clusterOperationContextFactory; private final StreamFactory streamFactory; - private final MongoCredentialWithCache credential; + private final InternalOperationContextFactory heartbeatOperationContextFactory; private final StreamFactory heartbeatStreamFactory; + private final MongoCredentialWithCache credential; private final LoggerSettings loggerSettings; private final CommandListener commandListener; private final String applicationName; @@ -58,18 +60,20 @@ public class DefaultClusterableServerFactory implements ClusterableServerFactory public DefaultClusterableServerFactory( final ServerSettings serverSettings, final ConnectionPoolSettings connectionPoolSettings, final InternalConnectionPoolSettings internalConnectionPoolSettings, - final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory, - @Nullable final MongoCredential credential, - final LoggerSettings loggerSettings, - @Nullable final CommandListener commandListener, - @Nullable final String applicationName, @Nullable final MongoDriverInformation mongoDriverInformation, + final InternalOperationContextFactory clusterOperationContextFactory, final StreamFactory streamFactory, + final InternalOperationContextFactory heartbeatOperationContextFactory, final StreamFactory heartbeatStreamFactory, + @Nullable final MongoCredential credential, final LoggerSettings loggerSettings, + @Nullable final CommandListener commandListener, @Nullable final String applicationName, + @Nullable final MongoDriverInformation mongoDriverInformation, final List compressorList, @Nullable final ServerApi serverApi, final boolean isFunctionAsAServiceEnvironment) { this.serverSettings = serverSettings; this.connectionPoolSettings = connectionPoolSettings; this.internalConnectionPoolSettings = internalConnectionPoolSettings; + this.clusterOperationContextFactory = clusterOperationContextFactory; this.streamFactory = streamFactory; - this.credential = credential == null ? null : new MongoCredentialWithCache(credential); + this.heartbeatOperationContextFactory = heartbeatOperationContextFactory; this.heartbeatStreamFactory = heartbeatStreamFactory; + this.credential = credential == null ? null : new MongoCredentialWithCache(credential); this.loggerSettings = loggerSettings; this.commandListener = commandListener; this.applicationName = applicationName; @@ -88,11 +92,11 @@ public ClusterableServer create(final Cluster cluster, final ServerAddress serve // no credentials, compressor list, or command listener for the server monitor factory new InternalStreamConnectionFactory(clusterMode, true, heartbeatStreamFactory, null, applicationName, mongoDriverInformation, emptyList(), loggerSettings, null, serverApi), - clusterMode, serverApi, isFunctionAsAServiceEnvironment, sdamProvider); + clusterMode, serverApi, isFunctionAsAServiceEnvironment, sdamProvider, heartbeatOperationContextFactory); ConnectionPool connectionPool = new DefaultConnectionPool(serverId, new InternalStreamConnectionFactory(clusterMode, streamFactory, credential, applicationName, mongoDriverInformation, compressorList, loggerSettings, commandListener, serverApi), - connectionPoolSettings, internalConnectionPoolSettings, sdamProvider); + connectionPoolSettings, internalConnectionPoolSettings, sdamProvider, clusterOperationContextFactory); ServerListener serverListener = singleServerListener(serverSettings); SdamServerDescriptionManager sdam = new DefaultSdamServerDescriptionManager(cluster, serverId, serverListener, serverMonitor, connectionPool, clusterMode); diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java index 26676718d41..78db18db2dc 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java @@ -21,7 +21,6 @@ import com.mongodb.MongoInterruptedException; import com.mongodb.MongoServerUnavailableException; import com.mongodb.MongoTimeoutException; -import com.mongodb.RequestContext; import com.mongodb.annotations.NotThreadSafe; import com.mongodb.annotations.ThreadSafe; import com.mongodb.connection.ClusterId; @@ -52,9 +51,8 @@ import com.mongodb.internal.inject.OptionalProvider; import com.mongodb.internal.logging.LogMessage; import com.mongodb.internal.logging.StructuredLogger; -import com.mongodb.internal.session.SessionContext; import com.mongodb.internal.thread.DaemonThreadFactory; -import com.mongodb.internal.time.TimePoint; +import com.mongodb.internal.time.StartTime; import com.mongodb.internal.time.Timeout; import com.mongodb.lang.NonNull; import com.mongodb.lang.Nullable; @@ -120,18 +118,17 @@ import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVICE_ID; import static com.mongodb.internal.logging.LogMessage.Entry.Name.WAIT_QUEUE_TIMEOUT_MS; import static com.mongodb.internal.logging.LogMessage.Level.DEBUG; -import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static java.lang.String.format; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.NANOSECONDS; -@SuppressWarnings("deprecation") @ThreadSafe final class DefaultConnectionPool implements ConnectionPool { private static final Logger LOGGER = Loggers.getLogger("connection"); private static final StructuredLogger STRUCTURED_LOGGER = new StructuredLogger("connection"); private final ConcurrentPool pool; private final ConnectionPoolSettings settings; + private final InternalOperationContextFactory operationContextFactory; private final BackgroundMaintenanceManager backgroundMaintenance; private final AsyncWorkManager asyncWorkManager; private final ConnectionPoolListener connectionPoolListener; @@ -145,8 +142,10 @@ final class DefaultConnectionPool implements ConnectionPool { @VisibleForTesting(otherwise = PRIVATE) DefaultConnectionPool(final ServerId serverId, final InternalConnectionFactory internalConnectionFactory, - final ConnectionPoolSettings settings, final OptionalProvider sdamProvider) { - this(serverId, internalConnectionFactory, settings, InternalConnectionPoolSettings.builder().build(), sdamProvider); + final ConnectionPoolSettings settings, final OptionalProvider sdamProvider, + final InternalOperationContextFactory operationContextFactory) { + this(serverId, internalConnectionFactory, settings, InternalConnectionPoolSettings.builder().build(), sdamProvider, + operationContextFactory); } /** @@ -160,13 +159,15 @@ final class DefaultConnectionPool implements ConnectionPool { */ DefaultConnectionPool(final ServerId serverId, final InternalConnectionFactory internalConnectionFactory, final ConnectionPoolSettings settings, final InternalConnectionPoolSettings internalSettings, - final OptionalProvider sdamProvider) { + final OptionalProvider sdamProvider, + final InternalOperationContextFactory operationContextFactory) { this.serverId = notNull("serverId", serverId); this.settings = notNull("settings", settings); UsageTrackingInternalConnectionItemFactory connectionItemFactory = new UsageTrackingInternalConnectionItemFactory(internalConnectionFactory); pool = new ConcurrentPool<>(maxSize(settings), connectionItemFactory, format("The server at %s is no longer available", serverId.getAddress())); + this.operationContextFactory = assertNotNull(operationContextFactory); this.sdamProvider = assertNotNull(sdamProvider); this.connectionPoolListener = getConnectionPoolListener(settings); backgroundMaintenance = new BackgroundMaintenanceManager(); @@ -189,18 +190,13 @@ public int getGeneration(@NonNull final ObjectId serviceId) { @Override public InternalConnection get(final OperationContext operationContext) { - return get(operationContext, settings.getMaxWaitTime(MILLISECONDS), MILLISECONDS); - } - - @Override - public InternalConnection get(final OperationContext operationContext, final long timeoutValue, final TimeUnit timeUnit) { - TimePoint checkoutStart = connectionCheckoutStarted(operationContext); - Timeout timeout = Timeout.started(timeoutValue, timeUnit, checkoutStart); + StartTime checkoutStart = connectionCheckoutStarted(operationContext); + Timeout waitQueueTimeout = operationContext.getTimeoutContext().startWaitQueueTimeout(checkoutStart); try { stateAndGeneration.throwIfClosedOrPaused(); - PooledConnection connection = getPooledConnection(timeout); + PooledConnection connection = getPooledConnection(waitQueueTimeout, checkoutStart); if (!connection.opened()) { - connection = openConcurrencyLimiter.openOrGetAvailable(connection, timeout); + connection = openConcurrencyLimiter.openOrGetAvailable(operationContext, connection, waitQueueTimeout, checkoutStart); } connection.checkedOutForOperation(operationContext); connectionCheckedOut(operationContext, connection, checkoutStart); @@ -212,12 +208,12 @@ public InternalConnection get(final OperationContext operationContext, final lon @Override public void getAsync(final OperationContext operationContext, final SingleResultCallback callback) { - TimePoint checkoutStart = connectionCheckoutStarted(operationContext); - Timeout timeout = Timeout.started(settings.getMaxWaitTime(NANOSECONDS), checkoutStart); + StartTime checkoutStart = connectionCheckoutStarted(operationContext); + Timeout maxWaitTimeout = checkoutStart.timeoutAfterOrInfiniteIfNegative(settings.getMaxWaitTime(NANOSECONDS), NANOSECONDS); SingleResultCallback eventSendingCallback = (connection, failure) -> { SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); if (failure == null) { - connection.checkedOutForOperation(operationContext); + assertNotNull(connection).checkedOutForOperation(operationContext); connectionCheckedOut(operationContext, connection, checkoutStart); errHandlingCallback.onResult(connection, null); } else { @@ -230,13 +226,13 @@ public void getAsync(final OperationContext operationContext, final SingleResult eventSendingCallback.onResult(null, e); return; } - asyncWorkManager.enqueue(new Task(timeout, t -> { + asyncWorkManager.enqueue(new Task(maxWaitTimeout, checkoutStart, t -> { if (t != null) { eventSendingCallback.onResult(null, t); } else { PooledConnection connection; try { - connection = getPooledConnection(timeout); + connection = getPooledConnection(maxWaitTimeout, checkoutStart); } catch (Exception e) { eventSendingCallback.onResult(null, e); return; @@ -244,7 +240,8 @@ public void getAsync(final OperationContext operationContext, final SingleResult if (connection.opened()) { eventSendingCallback.onResult(connection, null); } else { - openConcurrencyLimiter.openAsyncWithConcurrencyLimit(connection, timeout, eventSendingCallback); + openConcurrencyLimiter.openWithConcurrencyLimitAsync( + operationContext, connection, maxWaitTimeout, checkoutStart, eventSendingCallback); } } })); @@ -255,7 +252,7 @@ public void getAsync(final OperationContext operationContext, final SingleResult * and returns {@code t} if it is not {@link MongoOpenConnectionInternalException}, * or returns {@code t.}{@linkplain MongoOpenConnectionInternalException#getCause() getCause()} otherwise. */ - private Throwable checkOutFailed(final Throwable t, final OperationContext operationContext, final TimePoint checkoutStart) { + private Throwable checkOutFailed(final Throwable t, final OperationContext operationContext, final StartTime checkoutStart) { Throwable result = t; Reason reason; if (t instanceof MongoTimeoutException) { @@ -334,16 +331,22 @@ public int getGeneration() { return stateAndGeneration.generation(); } - private PooledConnection getPooledConnection(final Timeout timeout) throws MongoTimeoutException { + private PooledConnection getPooledConnection(final Timeout waitQueueTimeout, final StartTime startTime) throws MongoTimeoutException { try { - UsageTrackingInternalConnection internalConnection = pool.get(timeout.remainingOrInfinite(NANOSECONDS), NANOSECONDS); + UsageTrackingInternalConnection internalConnection = waitQueueTimeout.call(NANOSECONDS, + () -> pool.get(-1L, NANOSECONDS), + (ns) -> pool.get(ns, NANOSECONDS), + () -> pool.get(0L, NANOSECONDS)); while (shouldPrune(internalConnection)) { pool.release(internalConnection, true); - internalConnection = pool.get(timeout.remainingOrInfinite(NANOSECONDS), NANOSECONDS); + internalConnection = waitQueueTimeout.call(NANOSECONDS, + () -> pool.get(-1L, NANOSECONDS), + (ns) -> pool.get(ns, NANOSECONDS), + () -> pool.get(0L, NANOSECONDS)); } return new PooledConnection(internalConnection); } catch (MongoTimeoutException e) { - throw createTimeoutException(timeout); + throw createTimeoutException(startTime); } } @@ -357,12 +360,13 @@ private PooledConnection getPooledConnectionImmediateUnfair() { return internalConnection == null ? null : new PooledConnection(internalConnection); } - private MongoTimeoutException createTimeoutException(final Timeout timeout) { + private MongoTimeoutException createTimeoutException(final StartTime startTime) { + long elapsedMs = startTime.elapsed().toMillis(); int numPinnedToCursor = pinnedStatsManager.getNumPinnedToCursor(); int numPinnedToTransaction = pinnedStatsManager.getNumPinnedToTransaction(); if (numPinnedToCursor == 0 && numPinnedToTransaction == 0) { - return new MongoTimeoutException(format("Timed out after %s while waiting for a connection to server %s.", - timeout.toUserString(), serverId.getAddress())); + return new MongoTimeoutException(format("Timed out after %d ms while waiting for a connection to server %s.", + elapsedMs, serverId.getAddress())); } else { int maxSize = pool.getMaxSize(); int numInUse = pool.getInUseCount(); @@ -391,10 +395,10 @@ private MongoTimeoutException createTimeoutException(final Timeout timeout) { int numOtherInUse = numInUse - numPinnedToCursor - numPinnedToTransaction; assertTrue(numOtherInUse >= 0); assertTrue(numPinnedToCursor + numPinnedToTransaction + numOtherInUse <= maxSize); - return new MongoTimeoutException(format("Timed out after %s while waiting for a connection to server %s. Details: " + return new MongoTimeoutException(format("Timed out after %d ms while waiting for a connection to server %s. Details: " + "maxPoolSize: %s, connections in use by cursors: %d, connections in use by transactions: %d, " + "connections in use by other operations: %d", - timeout.toUserString(), serverId.getAddress(), + elapsedMs, serverId.getAddress(), sizeToString(maxSize), numPinnedToCursor, numPinnedToTransaction, numOtherInUse)); } @@ -418,7 +422,8 @@ void doMaintenance() { if (shouldEnsureMinSize()) { pool.ensureMinSize(settings.getMinSize(), newConnection -> { try { - openConcurrencyLimiter.openImmediatelyAndTryHandOverOrRelease(new PooledConnection(newConnection)); + OperationContext operationContext = operationContextFactory.createMaintenanceContext(); + openConcurrencyLimiter.openImmediatelyAndTryHandOverOrRelease(operationContext, new PooledConnection(newConnection)); } catch (MongoException | MongoOpenConnectionInternalException e) { RuntimeException actualException = e instanceof MongoOpenConnectionInternalException ? (RuntimeException) e.getCause() @@ -504,13 +509,14 @@ private void connectionPoolCreated(final ConnectionPoolListener connectionPoolLi * Send both current and deprecated events in order to preserve backwards compatibility. * Must not throw {@link Exception}s. * - * @return A {@link TimePoint} before executing {@link ConnectionPoolListener#connectionCreated(ConnectionCreatedEvent)} + * @return A {@link StartTime} before executing {@link ConnectionPoolListener#connectionCreated(ConnectionCreatedEvent)} * and logging the event. This order is required by + * CMAP * and {@link ConnectionReadyEvent#getElapsedTime(TimeUnit)}. */ - private TimePoint connectionCreated(final ConnectionPoolListener connectionPoolListener, final ConnectionId connectionId) { - TimePoint openStart = TimePoint.now(); + private StartTime connectionCreated(final ConnectionPoolListener connectionPoolListener, final ConnectionId connectionId) { + StartTime openStart = StartTime.now(); logEventMessage("Connection created", "Connection created: address={}:{}, driver-generated ID={}", connectionId.getLocalValue()); @@ -545,7 +551,7 @@ private void connectionClosed(final ConnectionPoolListener connectionPoolListene private void connectionCheckedOut( final OperationContext operationContext, final PooledConnection connection, - final TimePoint checkoutStart) { + final StartTime checkoutStart) { Duration checkoutDuration = checkoutStart.elapsed(); ConnectionId connectionId = getId(connection); ClusterId clusterId = serverId.getClusterId(); @@ -562,18 +568,19 @@ private void connectionCheckedOut( } /** - * @return A {@link TimePoint} before executing + * @return A {@link StartTime} before executing * {@link ConnectionPoolListener#connectionCheckOutStarted(ConnectionCheckOutStartedEvent)} and logging the event. * This order is required by * CMAP * and {@link ConnectionCheckedOutEvent#getElapsedTime(TimeUnit)}, {@link ConnectionCheckOutFailedEvent#getElapsedTime(TimeUnit)}. */ - private TimePoint connectionCheckoutStarted(final OperationContext operationContext) { - TimePoint checkoutStart = TimePoint.now(); + private StartTime connectionCheckoutStarted(final OperationContext operationContext) { + StartTime checkoutStart = StartTime.now(); logEventMessage("Connection checkout started", "Checkout started for connection to {}:{}"); connectionPoolListener.connectionCheckOutStarted(new ConnectionCheckOutStartedEvent(serverId, operationContext.getId())); return checkoutStart; + } /** @@ -598,7 +605,7 @@ private class PooledConnection implements InternalConnection { private final UsageTrackingInternalConnection wrapped; private final AtomicBoolean isClosed = new AtomicBoolean(); private Connection.PinningMode pinningMode; - private OperationContext operationContext; + private long operationId; PooledConnection(final UsageTrackingInternalConnection wrapped) { this.wrapped = notNull("wrapped", wrapped); @@ -610,19 +617,19 @@ public int getGeneration() { } /** - * Associates this with the operation context and establishes the checked out start time + * Associates this with the operation id and establishes the checked out start time */ public void checkedOutForOperation(final OperationContext operationContext) { - this.operationContext = operationContext; + this.operationId = operationContext.getId(); } @Override - public void open() { + public void open(final OperationContext operationContext) { assertFalse(isClosed.get()); - TimePoint openStart; + StartTime openStart; try { openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId()); - wrapped.open(); + wrapped.open(operationContext); } catch (Exception e) { closeAndHandleOpenFailure(); throw new MongoOpenConnectionInternalException(e); @@ -631,10 +638,10 @@ public void open() { } @Override - public void openAsync(final SingleResultCallback callback) { + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { assertFalse(isClosed.get()); - TimePoint openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId()); - wrapped.openAsync((nullResult, failure) -> { + StartTime openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId()); + wrapped.openAsync(operationContext, (nullResult, failure) -> { if (failure != null) { closeAndHandleOpenFailure(); callback.onResult(null, new MongoOpenConnectionInternalException(failure)); @@ -664,8 +671,7 @@ private void connectionCheckedIn() { logEventMessage("Connection checked in", "Connection checked in: address={}:{}, driver-generated ID={}", connectionId.getLocalValue()); - - connectionPoolListener.connectionCheckedIn(new ConnectionCheckedInEvent(connectionId, operationContext.getId())); + connectionPoolListener.connectionCheckedIn(new ConnectionCheckedInEvent(connectionId, operationId)); } void release() { @@ -701,7 +707,7 @@ private void closeAndHandleOpenFailure() { /** * Must not throw {@link Exception}s. */ - private void handleOpenSuccess(final TimePoint openStart) { + private void handleOpenSuccess(final StartTime openStart) { Duration openDuration = openStart.elapsed(); ConnectionId connectionId = getId(this); ClusterId clusterId = serverId.getClusterId(); @@ -731,34 +737,27 @@ public ByteBuf getBuffer(final int capacity) { } @Override - public void sendMessage(final List byteBuffers, final int lastRequestId) { + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { isTrue("open", !isClosed.get()); - wrapped.sendMessage(byteBuffers, lastRequestId); + wrapped.sendMessage(byteBuffers, lastRequestId, operationContext); } @Override - public T sendAndReceive(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext) { + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { isTrue("open", !isClosed.get()); - return wrapped.sendAndReceive(message, decoder, sessionContext, requestContext, operationContext); + return wrapped.sendAndReceive(message, decoder, operationContext); } @Override - public void send(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext) { + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { isTrue("open", !isClosed.get()); - wrapped.send(message, decoder, sessionContext); + wrapped.send(message, decoder, operationContext); } @Override - public T receive(final Decoder decoder, final SessionContext sessionContext) { + public T receive(final Decoder decoder, final OperationContext operationContext) { isTrue("open", !isClosed.get()); - return wrapped.receive(decoder, sessionContext); - } - - @Override - public T receive(final Decoder decoder, final SessionContext sessionContext, final int additionalTimeout) { - isTrue("open", !isClosed.get()); - return wrapped.receive(decoder, sessionContext, additionalTimeout); + return wrapped.receive(decoder, operationContext); } @Override @@ -768,28 +767,30 @@ public boolean hasMoreToCome() { } @Override - public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext, final SingleResultCallback callback) { + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, final SingleResultCallback callback) { isTrue("open", !isClosed.get()); - wrapped.sendAndReceiveAsync(message, decoder, sessionContext, requestContext, operationContext, (result, t) -> callback.onResult(result, t)); + wrapped.sendAndReceiveAsync(message, decoder, operationContext, callback); } @Override - public ResponseBuffers receiveMessage(final int responseTo) { + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { isTrue("open", !isClosed.get()); - return wrapped.receiveMessage(responseTo); + return wrapped.receiveMessage(responseTo, operationContext); } @Override - public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final SingleResultCallback callback) { + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { isTrue("open", !isClosed.get()); - wrapped.sendMessageAsync(byteBuffers, lastRequestId, (result, t) -> callback.onResult(null, t)); + wrapped.sendMessageAsync(byteBuffers, lastRequestId, operationContext, (result, t) -> callback.onResult(null, t)); } @Override - public void receiveMessageAsync(final int responseTo, final SingleResultCallback callback) { + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { isTrue("open", !isClosed.get()); - wrapped.receiveMessageAsync(responseTo, (result, t) -> callback.onResult(result, t)); + wrapped.receiveMessageAsync(responseTo, operationContext, callback); } @Override @@ -825,7 +826,7 @@ public ServerDescription getInitialServerDescription() { /** * This internal exception is used to express an exceptional situation encountered when opening a connection. * It exists because it allows consolidating the code that sends events for exceptional situations in a - * {@linkplain #checkOutFailed(Throwable, OperationContext, TimePoint) single place}, it must not be observable by an external code. + * {@linkplain #checkOutFailed(Throwable, OperationContext, StartTime) single place}, it must not be observable by an external code. */ private static final class MongoOpenConnectionInternalException extends RuntimeException { private static final long serialVersionUID = 1; @@ -902,19 +903,29 @@ private final class OpenConcurrencyLimiter { desiredConnectionSlots = new LinkedList<>(); } - PooledConnection openOrGetAvailable(final PooledConnection connection, final Timeout timeout) throws MongoTimeoutException { - PooledConnection result = openWithConcurrencyLimit(connection, OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, timeout); + PooledConnection openOrGetAvailable(final OperationContext operationContext, final PooledConnection connection, + final Timeout waitQueueTimeout, final StartTime startTime) + throws MongoTimeoutException { + PooledConnection result = openWithConcurrencyLimit( + operationContext, connection, OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, + waitQueueTimeout, startTime); return assertNotNull(result); } - void openImmediatelyAndTryHandOverOrRelease(final PooledConnection connection) throws MongoTimeoutException { - assertNull(openWithConcurrencyLimit(connection, OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE, Timeout.immediate())); + void openImmediatelyAndTryHandOverOrRelease(final OperationContext operationContext, + final PooledConnection connection) throws MongoTimeoutException { + StartTime startTime = StartTime.now(); + Timeout timeout = startTime.asTimeout(); + assertNull(openWithConcurrencyLimit( + operationContext, + connection, OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE, + timeout, startTime)); } /** - * This method can be thought of as operating in two phases. - * In the first phase it tries to synchronously acquire a permit to open the {@code connection} - * or get a different {@linkplain PooledConnection#opened() opened} connection if {@code mode} is + * This method can be thought of as operating in two phases. In the first phase it tries to synchronously + * acquire a permit to open the {@code connection} or get a different + * {@linkplain PooledConnection#opened() opened} connection if {@code mode} is * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE} and one becomes available while waiting for a permit. * The first phase has one of the following outcomes: *
      @@ -925,7 +936,7 @@ void openImmediatelyAndTryHandOverOrRelease(final PooledConnection connection) t * This outcome is possible only if {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}. *
    1. A permit is acquired, {@link #connectionCreated(ConnectionPoolListener, ConnectionId)} is reported * and an attempt to open the specified {@code connection} is made. This is the second phase in which - * the {@code connection} is {@linkplain PooledConnection#open() opened synchronously}. + * the {@code connection} is {@linkplain InternalConnection#open(OperationContext) opened synchronously}. * The attempt to open the {@code connection} has one of the following outcomes * combined with releasing the acquired permit: *
        @@ -939,20 +950,23 @@ void openImmediatelyAndTryHandOverOrRelease(final PooledConnection connection) t * *
      * - * @param timeout Applies only to the first phase. - * @return An {@linkplain PooledConnection#opened() opened} connection which is - * either the specified {@code connection}, - * or potentially a different one if {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}, - * or {@code null} if {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_HAND_OVER_OR_RELEASE}. + * @param operationContext the operation context + * @param waitQueueTimeout Applies only to the first phase. + * @return An {@linkplain PooledConnection#opened() opened} connection which is either the specified + * {@code connection}, or potentially a different one if {@code mode} is + * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}, or {@code null} if {@code mode} is + * {@link OpenWithConcurrencyLimitMode#TRY_HAND_OVER_OR_RELEASE}. * @throws MongoTimeoutException If the first phase timed out. */ @Nullable - private PooledConnection openWithConcurrencyLimit(final PooledConnection connection, final OpenWithConcurrencyLimitMode mode, - final Timeout timeout) throws MongoTimeoutException { + private PooledConnection openWithConcurrencyLimit(final OperationContext operationContext, + final PooledConnection connection, final OpenWithConcurrencyLimitMode mode, + final Timeout waitQueueTimeout, final StartTime startTime) + throws MongoTimeoutException { PooledConnection availableConnection; try {//phase one availableConnection = acquirePermitOrGetAvailableOpenedConnection( - mode == OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, timeout); + mode == OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, waitQueueTimeout, startTime); } catch (Exception e) { connection.closeSilently(); throw e; @@ -962,7 +976,7 @@ private PooledConnection openWithConcurrencyLimit(final PooledConnection connect return availableConnection; } else {//acquired a permit, phase two try { - connection.open(); + connection.open(operationContext); if (mode == OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE) { tryHandOverOrRelease(connection.wrapped); return null; @@ -976,23 +990,25 @@ private PooledConnection openWithConcurrencyLimit(final PooledConnection connect } /** - * This method is similar to {@link #openWithConcurrencyLimit(PooledConnection, OpenWithConcurrencyLimitMode, Timeout)} + * This method is similar to {@link #openWithConcurrencyLimit(OperationContext, PooledConnection, OpenWithConcurrencyLimitMode, Timeout, StartTime)} * with the following differences: *
        *
      • It does not have the {@code mode} parameter and acts as if this parameter were * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}.
      • *
      • While the first phase is still synchronous, the {@code connection} is - * {@linkplain PooledConnection#openAsync(SingleResultCallback) opened asynchronously} in the second phase.
      • + * {@linkplain InternalConnection#openAsync(OperationContext, SingleResultCallback) opened asynchronously} in the second phase. *
      • Instead of returning a result or throwing an exception via Java {@code return}/{@code throw} statements, * it calls {@code callback.}{@link SingleResultCallback#onResult(Object, Throwable) onResult(result, failure)} * and passes either a {@link PooledConnection} or an {@link Exception}.
      • *
      */ - void openAsyncWithConcurrencyLimit( - final PooledConnection connection, final Timeout timeout, final SingleResultCallback callback) { + void openWithConcurrencyLimitAsync( + final OperationContext operationContext, final PooledConnection connection, + final Timeout maxWaitTimeout, final StartTime startTime, + final SingleResultCallback callback) { PooledConnection availableConnection; try {//phase one - availableConnection = acquirePermitOrGetAvailableOpenedConnection(true, timeout); + availableConnection = acquirePermitOrGetAvailableOpenedConnection(true, maxWaitTimeout, startTime); } catch (Exception e) { connection.closeSilently(); callback.onResult(null, e); @@ -1002,7 +1018,7 @@ void openAsyncWithConcurrencyLimit( connection.closeSilently(); callback.onResult(availableConnection, null); } else {//acquired a permit, phase two - connection.openAsync((nullResult, failure) -> { + connection.openAsync(operationContext, (nullResult, failure) -> { releasePermit(); if (failure != null) { callback.onResult(null, failure); @@ -1022,7 +1038,8 @@ void openAsyncWithConcurrencyLimit( * set on entry to this method or is interrupted while waiting to get an available opened connection. */ @Nullable - private PooledConnection acquirePermitOrGetAvailableOpenedConnection(final boolean tryGetAvailable, final Timeout timeout) + private PooledConnection acquirePermitOrGetAvailableOpenedConnection(final boolean tryGetAvailable, + final Timeout waitQueueTimeout, final StartTime startTime) throws MongoTimeoutException, MongoInterruptedException { PooledConnection availableConnection = null; boolean expressedDesireToGetAvailableConnection = false; @@ -1048,15 +1065,16 @@ private PooledConnection acquirePermitOrGetAvailableOpenedConnection(final boole expressDesireToGetAvailableConnection(); expressedDesireToGetAvailableConnection = true; } - long remainingNanos = timeout.remainingOrInfinite(NANOSECONDS); while (permits == 0 // the absence of short-circuiting is of importance & !stateAndGeneration.throwIfClosedOrPaused() & (availableConnection = tryGetAvailable ? tryGetAvailableConnection() : null) == null) { - if (Timeout.expired(remainingNanos)) { - throw createTimeoutException(timeout); - } - remainingNanos = awaitNanos(permitAvailableOrHandedOverOrClosedOrPausedCondition, remainingNanos); + + Timeout.onExistsAndExpired(waitQueueTimeout, () -> { + throw createTimeoutException(startTime); + }); + waitQueueTimeout.awaitOn(permitAvailableOrHandedOverOrClosedOrPausedCondition, + () -> "acquiring permit or getting available opened connection"); } if (availableConnection == null) { assertTrue(permits > 0); @@ -1129,28 +1147,10 @@ void tryHandOverOrRelease(final UsageTrackingInternalConnection openConnection) void signalClosedOrPaused() { withUnfairLock(lock, permitAvailableOrHandedOverOrClosedOrPausedCondition::signalAll); } - - /** - * @param timeoutNanos See {@link Timeout#started(long, TimePoint)}. - * @return The remaining duration as per {@link Timeout#remainingOrInfinite(TimeUnit)} if waiting ended early either - * spuriously or because of receiving a signal. - */ - private long awaitNanos(final Condition condition, final long timeoutNanos) throws MongoInterruptedException { - try { - if (timeoutNanos < 0 || timeoutNanos == Long.MAX_VALUE) { - condition.await(); - return -1; - } else { - return Math.max(0, condition.awaitNanos(timeoutNanos)); - } - } catch (InterruptedException e) { - throw interruptAndCreateMongoInterruptedException(null, e); - } - } } /** - * @see OpenConcurrencyLimiter#openWithConcurrencyLimit(PooledConnection, OpenWithConcurrencyLimitMode, Timeout) + * @see OpenConcurrencyLimiter#openWithConcurrencyLimit(OperationContext, PooledConnection, OpenWithConcurrencyLimitMode, Timeout, StartTime) */ private enum OpenWithConcurrencyLimitMode { TRY_GET_AVAILABLE, @@ -1341,11 +1341,11 @@ private void workerRun() { while (state != State.CLOSED) { try { Task task = tasks.take(); - if (task.timeout().expired()) { - task.failAsTimedOut(); - } else { - task.execute(); - } + + task.timeout().run(NANOSECONDS, + () -> task.execute(), + (ns) -> task.execute(), + () -> task.failAsTimedOut()); } catch (InterruptedException closed) { // fail the rest of the tasks and stop } catch (Exception e) { @@ -1391,11 +1391,13 @@ private enum State { @NotThreadSafe final class Task { private final Timeout timeout; + private final StartTime startTime; private final Consumer action; private boolean completed; - Task(final Timeout timeout, final Consumer action) { + Task(final Timeout timeout, final StartTime startTime, final Consumer action) { this.timeout = timeout; + this.startTime = startTime; this.action = action; } @@ -1408,7 +1410,7 @@ void failAsClosed() { } void failAsTimedOut() { - doComplete(() -> createTimeoutException(timeout)); + doComplete(() -> createTimeoutException(startTime)); } private void doComplete(final Supplier failureSupplier) { diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java index 2b300cdfa50..8f3d0f09fd9 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java @@ -18,7 +18,6 @@ import com.mongodb.MongoException; import com.mongodb.MongoServerUnavailableException; -import com.mongodb.MongoSocketException; import com.mongodb.ReadPreference; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ConnectionDescription; @@ -29,7 +28,6 @@ import com.mongodb.event.ServerOpeningEvent; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.binding.BindingContext; import com.mongodb.internal.connection.SdamServerDescriptionManager.SdamIssue; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; @@ -198,15 +196,16 @@ ServerId serverId() { return serverId; } - private class DefaultServerProtocolExecutor implements ProtocolExecutor { + private class DefaultServerProtocolExecutor extends AbstractProtocolExecutor { @SuppressWarnings("unchecked") @Override public T execute(final CommandProtocol protocol, final InternalConnection connection, final SessionContext sessionContext) { try { - protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)); - return protocol.execute(connection); + return protocol + .withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .execute(connection); } catch (MongoException e) { try { sdam.handleExceptionAfterHandshake(SdamIssue.specific(e, sdam.context(connection))); @@ -216,9 +215,9 @@ public T execute(final CommandProtocol protocol, final InternalConnection if (e instanceof MongoWriteConcernWithResponseException) { return (T) ((MongoWriteConcernWithResponseException) e).getResponse(); } else { - if (e instanceof MongoSocketException && sessionContext.hasSession()) { + if (shouldMarkSessionDirty(e, sessionContext)) { sessionContext.markSessionDirty(); - } + } throw e; } } @@ -228,8 +227,8 @@ public T execute(final CommandProtocol protocol, final InternalConnection @Override public void executeAsync(final CommandProtocol protocol, final InternalConnection connection, final SessionContext sessionContext, final SingleResultCallback callback) { - protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)); - protocol.executeAsync(connection, errorHandlingCallback((result, t) -> { + protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .executeAsync(connection, errorHandlingCallback((result, t) -> { if (t != null) { try { sdam.handleExceptionAfterHandshake(SdamIssue.specific(t, sdam.context(connection))); @@ -239,7 +238,7 @@ public void executeAsync(final CommandProtocol protocol, final InternalCo if (t instanceof MongoWriteConcernWithResponseException) { callback.onResult((T) ((MongoWriteConcernWithResponseException) t).getResponse(), null); } else { - if (t instanceof MongoSocketException && sessionContext.hasSession()) { + if (shouldMarkSessionDirty(t, sessionContext)) { sessionContext.markSessionDirty(); } callback.onResult(null, t); @@ -295,16 +294,16 @@ public ConnectionDescription getDescription() { @Override public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context) { - return wrapped.command(database, command, fieldNameValidator, readPreference, commandResultDecoder, context); + final OperationContext operationContext) { + return wrapped.command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext); } @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context, final boolean responseExpected, + final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) { - return wrapped.command(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, context, + return wrapped.command(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext, responseExpected, payload, payloadFieldNameValidator); } @@ -356,19 +355,19 @@ public ConnectionDescription getDescription() { @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, - @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context, - final SingleResultCallback callback) { + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final SingleResultCallback callback) { wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, - context, callback); + operationContext, callback); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, - @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context, - final boolean responseExpected, @Nullable final SplittablePayload payload, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback callback) { wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, - context, responseExpected, payload, payloadFieldNameValidator, callback); + operationContext, responseExpected, payload, payloadFieldNameValidator, callback); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java index 3b053490464..01d5f587fdc 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java @@ -20,7 +20,6 @@ import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.binding.BindingContext; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.session.SessionContext; @@ -70,39 +69,38 @@ public ConnectionDescription getDescription() { @Nullable @Override public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, - @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context) { - return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, context, true, null, null); + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext) { + return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, null, null); } @Nullable @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context, final boolean responseExpected, + final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) { - return executeProtocol(new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, - commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, - context.getServerApi(), context.getRequestContext(), context.getOperationContext()), - context.getSessionContext()); + return executeProtocol( + new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, + responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, operationContext), + operationContext.getSessionContext()); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, - @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final SingleResultCallback callback) { commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, - context, true, null, null, callback); + operationContext, true, null, null, callback); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, - @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback callback) { executeProtocolAsync(new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, - commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, - context.getServerApi(), context.getRequestContext(), context.getOperationContext()), - context.getSessionContext(), callback); + commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, operationContext), + operationContext.getSessionContext(), callback); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java index 55030a6db34..656c9bc7779 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java @@ -29,10 +29,10 @@ import com.mongodb.event.ServerHeartbeatStartedEvent; import com.mongodb.event.ServerHeartbeatSucceededEvent; import com.mongodb.event.ServerMonitorListener; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.inject.Provider; -import com.mongodb.internal.session.SessionContext; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonBoolean; @@ -73,6 +73,7 @@ class DefaultServerMonitor implements ServerMonitor { private final ServerId serverId; private final ServerMonitorListener serverMonitorListener; private final Provider sdamProvider; + private final InternalOperationContextFactory operationContextFactory; private final InternalConnectionFactory internalConnectionFactory; private final ClusterConnectionMode clusterConnectionMode; @Nullable @@ -85,22 +86,24 @@ class DefaultServerMonitor implements ServerMonitor { */ @Nullable private RoundTripTimeMonitor roundTripTimeMonitor; - private final ExponentiallyWeightedMovingAverage averageRoundTripTime = new ExponentiallyWeightedMovingAverage(0.2); + private final RoundTripTimeSampler roundTripTimeSampler = new RoundTripTimeSampler(); private final Lock lock = new ReentrantLock(); private final Condition condition = lock.newCondition(); private volatile boolean isClosed; DefaultServerMonitor(final ServerId serverId, final ServerSettings serverSettings, final InternalConnectionFactory internalConnectionFactory, - final ClusterConnectionMode clusterConnectionMode, - @Nullable final ServerApi serverApi, - final boolean isFunctionAsAServiceEnvironment, - final Provider sdamProvider) { + final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi, + final boolean isFunctionAsAServiceEnvironment, + final Provider sdamProvider, + final InternalOperationContextFactory operationContextFactory) { this.serverSettings = notNull("serverSettings", serverSettings); this.serverId = notNull("serverId", serverId); this.serverMonitorListener = singleServerMonitorListener(serverSettings); this.internalConnectionFactory = notNull("internalConnectionFactory", internalConnectionFactory); this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); + this.operationContextFactory = assertNotNull(operationContextFactory); this.serverApi = serverApi; this.isFunctionAsAServiceEnvironment = isFunctionAsAServiceEnvironment; this.sdamProvider = sdamProvider; @@ -135,7 +138,7 @@ public void close() { isClosed = true; //noinspection EmptyTryBlock try (ServerMonitor ignoredAutoClosed = monitor; - RoundTripTimeMonitor ignoredAutoClose2 = roundTripTimeMonitor) { + RoundTripTimeMonitor ignoredAutoClose2 = roundTripTimeMonitor) { // we are automatically closing resources here } }); @@ -213,9 +216,9 @@ private ServerDescription lookupServerDescription(final ServerDescription curren if (connection == null || connection.isClosed()) { currentCheckCancelled = false; InternalConnection newConnection = internalConnectionFactory.create(serverId); - newConnection.open(); + newConnection.open(operationContextFactory.create()); connection = newConnection; - averageRoundTripTime.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos()); + roundTripTimeSampler.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos()); return connection.getInitialServerDescription(); } @@ -228,7 +231,7 @@ private ServerDescription lookupServerDescription(final ServerDescription curren long start = System.nanoTime(); try { - SessionContext sessionContext = NoOpSessionContext.INSTANCE; + OperationContext operationContext = operationContextFactory.create(); if (!connection.hasMoreToCome()) { BsonDocument helloDocument = new BsonDocument(getHandshakeCommandName(currentServerDescription), new BsonInt32(1)) .append("helloOk", BsonBoolean.TRUE); @@ -238,26 +241,26 @@ private ServerDescription lookupServerDescription(final ServerDescription curren } connection.send(createCommandMessage(helloDocument, connection, currentServerDescription), new BsonDocumentCodec(), - sessionContext); + operationContext); } BsonDocument helloResult; if (shouldStreamResponses) { - helloResult = connection.receive(new BsonDocumentCodec(), sessionContext, - Math.toIntExact(serverSettings.getHeartbeatFrequency(MILLISECONDS))); + helloResult = connection.receive(new BsonDocumentCodec(), operationContextWithAdditionalTimeout(operationContext)); } else { - helloResult = connection.receive(new BsonDocumentCodec(), sessionContext); + helloResult = connection.receive(new BsonDocumentCodec(), operationContext); } long elapsedTimeNanos = System.nanoTime() - start; if (!shouldStreamResponses) { - averageRoundTripTime.addSample(elapsedTimeNanos); + roundTripTimeSampler.addSample(elapsedTimeNanos); } serverMonitorListener.serverHeartbeatSucceeded( new ServerHeartbeatSucceededEvent(connection.getDescription().getConnectionId(), helloResult, elapsedTimeNanos, shouldStreamResponses)); - return createServerDescription(serverId.getAddress(), helloResult, averageRoundTripTime.getAverage()); + return createServerDescription(serverId.getAddress(), helloResult, roundTripTimeSampler.getAverage(), + roundTripTimeSampler.getMin()); } catch (Exception e) { serverMonitorListener.serverHeartbeatFailed( new ServerHeartbeatFailedEvent(connection.getDescription().getConnectionId(), System.nanoTime() - start, @@ -265,7 +268,7 @@ private ServerDescription lookupServerDescription(final ServerDescription curren throw e; } } catch (Throwable t) { - averageRoundTripTime.reset(); + roundTripTimeSampler.reset(); InternalConnection localConnection = withLock(lock, () -> { InternalConnection result = connection; connection = null; @@ -278,6 +281,12 @@ private ServerDescription lookupServerDescription(final ServerDescription curren } } + private OperationContext operationContextWithAdditionalTimeout(final OperationContext originalOperationContext) { + TimeoutContext newTimeoutContext = originalOperationContext.getTimeoutContext() + .withAdditionalReadTimeout(Math.toIntExact(serverSettings.getHeartbeatFrequency(MILLISECONDS))); + return originalOperationContext.withTimeoutContext(newTimeoutContext); + } + private boolean shouldStreamResponses(final ServerDescription currentServerDescription) { boolean serverSupportsStreaming = currentServerDescription.getTopologyVersion() != null; switch (serverSettings.getServerMonitoringMode()) { @@ -297,7 +306,7 @@ private boolean shouldStreamResponses(final ServerDescription currentServerDescr } private CommandMessage createCommandMessage(final BsonDocument command, final InternalConnection connection, - final ServerDescription currentServerDescription) { + final ServerDescription currentServerDescription) { return new CommandMessage(new MongoNamespace("admin", COMMAND_COLLECTION_NAME), command, new NoOpFieldNameValidator(), primary(), MessageSettings.builder() @@ -307,7 +316,7 @@ private CommandMessage createCommandMessage(final BsonDocument command, final In } private void logStateChange(final ServerDescription previousServerDescription, - final ServerDescription currentServerDescription) { + final ServerDescription currentServerDescription) { if (shouldLogStageChange(previousServerDescription, currentServerDescription)) { if (currentServerDescription.getException() != null) { LOGGER.info(format("Exception in monitor thread while connecting to server %s", serverId.getAddress()), @@ -395,12 +404,12 @@ static boolean shouldLogStageChange(final ServerDescription previous, final Serv } ObjectId previousElectionId = previous.getElectionId(); if (previousElectionId != null - ? !previousElectionId.equals(current.getElectionId()) : current.getElectionId() != null) { + ? !previousElectionId.equals(current.getElectionId()) : current.getElectionId() != null) { return true; } Integer setVersion = previous.getSetVersion(); if (setVersion != null - ? !setVersion.equals(current.getSetVersion()) : current.getSetVersion() != null) { + ? !setVersion.equals(current.getSetVersion()) : current.getSetVersion() != null) { return true; } @@ -470,17 +479,18 @@ public void run() { private void initialize() { connection = null; connection = internalConnectionFactory.create(serverId); - connection.open(); - averageRoundTripTime.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos()); + connection.open(operationContextFactory.create()); + roundTripTimeSampler.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos()); } private void pingServer(final InternalConnection connection) { long start = System.nanoTime(); + OperationContext operationContext = operationContextFactory.create(); executeCommand("admin", new BsonDocument(getHandshakeCommandName(connection.getInitialServerDescription()), new BsonInt32(1)), - clusterConnectionMode, serverApi, connection); + clusterConnectionMode, serverApi, connection, operationContext); long elapsedTimeNanos = System.nanoTime() - start; - averageRoundTripTime.addSample(elapsedTimeNanos); + roundTripTimeSampler.addSample(elapsedTimeNanos); } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java b/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java index e220d88bb31..26f73bcee9c 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java @@ -87,11 +87,12 @@ static ConnectionDescription createConnectionDescription(final ClusterConnection } public static ServerDescription createServerDescription(final ServerAddress serverAddress, final BsonDocument helloResult, - final long roundTripTime) { + final long roundTripTime, final long minRoundTripTime) { return ServerDescription.builder() .state(CONNECTED) .address(serverAddress) .type(getServerType(helloResult)) + .cryptd(helloResult.getBoolean("iscryptd", BsonBoolean.FALSE).getValue()) .canonicalAddress(helloResult.containsKey("me") ? helloResult.getString("me").getValue() : null) .hosts(listToSet(helloResult.getArray("hosts", new BsonArray()))) .passives(listToSet(helloResult.getArray("passives", new BsonArray()))) @@ -107,6 +108,7 @@ public static ServerDescription createServerDescription(final ServerAddress serv .topologyVersion(getTopologyVersion(helloResult)) .lastWriteDate(getLastWriteDate(helloResult)) .roundTripTime(roundTripTime, NANOSECONDS) + .minRoundTripTime(minRoundTripTime, NANOSECONDS) .logicalSessionTimeoutMinutes(getLogicalSessionTimeoutMinutes(helloResult)) .helloOk(helloResult.getBoolean("helloOk", BsonBoolean.FALSE).getValue()) .ok(CommandHelper.isCommandOk(helloResult)).build(); diff --git a/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java b/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java index 3831d2bfa35..ed5e55b822a 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java +++ b/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java @@ -171,7 +171,7 @@ void read( void write( ByteBuffer src, long timeout, TimeUnit unit, - A attach, CompletionHandler handler); + @Nullable A attach, CompletionHandler handler); /** * Writes a sequence of bytes to this channel from a subsequence of the given @@ -233,5 +233,5 @@ void write( void write( ByteBuffer[] srcs, int offset, int length, long timeout, TimeUnit unit, - A attach, CompletionHandler handler); + @Nullable A attach, CompletionHandler handler); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java index e2b0188572e..792c33570b7 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java @@ -16,11 +16,9 @@ package com.mongodb.internal.connection; -import com.mongodb.RequestContext; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.ByteBuf; import org.bson.codecs.Decoder; @@ -50,15 +48,18 @@ public interface InternalConnection extends BufferProvider { /** * Opens the connection so its ready for use. Will perform a handshake. + * + * @param operationContext the operation context */ - void open(); + void open(OperationContext operationContext); /** * Opens the connection so its ready for use * - * @param callback the callback to be called once the connection has been opened + * @param operationContext the operation context + * @param callback the callback to be called once the connection has been opened */ - void openAsync(SingleResultCallback callback); + void openAsync(OperationContext operationContext, SingleResultCallback callback); /** * Closes the connection. @@ -90,22 +91,14 @@ public interface InternalConnection extends BufferProvider { * Send a command message to the server. * * @param message the command message to send - * @param sessionContext the session context - * @param requestContext the request context * @param operationContext the operation context */ @Nullable - T sendAndReceive(CommandMessage message, Decoder decoder, SessionContext sessionContext, RequestContext requestContext, - OperationContext operationContext); + T sendAndReceive(CommandMessage message, Decoder decoder, OperationContext operationContext); - void send(CommandMessage message, Decoder decoder, SessionContext sessionContext); + void send(CommandMessage message, Decoder decoder, OperationContext operationContext); - T receive(Decoder decoder, SessionContext sessionContext); - - - default T receive(Decoder decoder, SessionContext sessionContext, int additionalTimeout) { - throw new UnsupportedOperationException(); - } + T receive(Decoder decoder, OperationContext operationContext); boolean hasMoreToCome(); @@ -113,45 +106,47 @@ default T receive(Decoder decoder, SessionContext sessionContext, int add * Send a command message to the server. * * @param message the command message to send - * @param sessionContext the session context - * @param operationContext the operation context * @param callback the callback */ - void sendAndReceiveAsync(CommandMessage message, Decoder decoder, SessionContext sessionContext, RequestContext requestContext, - OperationContext operationContext, SingleResultCallback callback); + void sendAndReceiveAsync(CommandMessage message, Decoder decoder, OperationContext operationContext, SingleResultCallback callback); /** * Send a message to the server. The connection may not make any attempt to validate the integrity of the message. * * @param byteBuffers the list of byte buffers to send. * @param lastRequestId the request id of the last message in byteBuffers + * @param operationContext the operation context */ - void sendMessage(List byteBuffers, int lastRequestId); + void sendMessage(List byteBuffers, int lastRequestId, OperationContext operationContext); /** * Receive a response to a sent message from the server. * * @param responseTo the request id that this message is a response to + * @param operationContext the operation context * @return the response */ - ResponseBuffers receiveMessage(int responseTo); + ResponseBuffers receiveMessage(int responseTo, OperationContext operationContext); /** * Asynchronously send a message to the server. The connection may not make any attempt to validate the integrity of the message. * * @param byteBuffers the list of byte buffers to send * @param lastRequestId the request id of the last message in byteBuffers + * @param operationContext the operation context * @param callback the callback to invoke on completion */ - void sendMessageAsync(List byteBuffers, int lastRequestId, SingleResultCallback callback); + void sendMessageAsync(List byteBuffers, int lastRequestId, OperationContext operationContext, + SingleResultCallback callback); /** * Asynchronously receive a response to a sent message from the server. * * @param responseTo the request id that this message is a response to + * @param operationContext the operation context * @param callback the callback to invoke on completion */ - void receiveMessageAsync(int responseTo, SingleResultCallback callback); + void receiveMessageAsync(int responseTo, OperationContext operationContext, SingleResultCallback callback); default void markAsPinned(Connection.PinningMode pinningMode) { } diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java index 9826f20b69b..077e2c68254 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java @@ -20,14 +20,19 @@ interface InternalConnectionInitializer { - InternalConnectionInitializationDescription startHandshake(InternalConnection internalConnection); + InternalConnectionInitializationDescription startHandshake(InternalConnection internalConnection, + OperationContext operationContext); InternalConnectionInitializationDescription finishHandshake(InternalConnection internalConnection, - InternalConnectionInitializationDescription description); + InternalConnectionInitializationDescription description, + OperationContext operationContext); void startHandshakeAsync(InternalConnection internalConnection, + OperationContext operationContext, SingleResultCallback callback); - void finishHandshakeAsync(InternalConnection internalConnection, InternalConnectionInitializationDescription description, + void finishHandshakeAsync(InternalConnection internalConnection, + InternalConnectionInitializationDescription description, + OperationContext operationContext, SingleResultCallback callback); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java b/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java new file mode 100644 index 00000000000..4653c90050b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.ServerApi; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.lang.Nullable; + +import static com.mongodb.internal.connection.OperationContext.simpleOperationContext; + +public final class InternalOperationContextFactory { + + private final TimeoutSettings timeoutSettings; + @Nullable + private final ServerApi serverApi; + + public InternalOperationContextFactory(final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) { + this.timeoutSettings = timeoutSettings; + this.serverApi = serverApi; + } + + /** + * @return a simple operation context without timeoutMS + */ + OperationContext create() { + return simpleOperationContext(timeoutSettings.connectionOnly(), serverApi); + } + + /** + * @return a simple operation context with timeoutMS if set at the MongoClientSettings level + */ + + OperationContext createMaintenanceContext() { + return create().withTimeoutContext(TimeoutContext.createMaintenanceTimeoutContext(timeoutSettings)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java index fc90ce81bef..8c1b273c52b 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java @@ -23,11 +23,12 @@ import com.mongodb.MongoException; import com.mongodb.MongoInternalException; import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoSocketClosedException; import com.mongodb.MongoSocketReadException; import com.mongodb.MongoSocketReadTimeoutException; import com.mongodb.MongoSocketWriteException; -import com.mongodb.RequestContext; +import com.mongodb.MongoSocketWriteTimeoutException; import com.mongodb.ServerAddress; import com.mongodb.annotations.NotThreadSafe; import com.mongodb.connection.AsyncCompletionHandler; @@ -41,6 +42,7 @@ import com.mongodb.connection.ServerType; import com.mongodb.event.CommandListener; import com.mongodb.internal.ResourceUtil; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.AsyncSupplier; import com.mongodb.internal.async.SingleResultCallback; @@ -48,6 +50,7 @@ import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.logging.StructuredLogger; import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonBinaryReader; import org.bson.BsonDocument; @@ -73,6 +76,7 @@ import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.connection.Authenticator.shouldAuthenticate; import static com.mongodb.internal.connection.CommandHelper.HELLO; @@ -219,16 +223,19 @@ public int getGeneration() { } @Override - public void open() { + public void open(final OperationContext originalOperationContext) { isTrue("Open already called", stream == null); stream = streamFactory.create(serverId.getAddress()); try { - stream.open(); + OperationContext operationContext = originalOperationContext + .withTimeoutContext(originalOperationContext.getTimeoutContext().withComputedServerSelectionTimeoutContext()); - InternalConnectionInitializationDescription initializationDescription = connectionInitializer.startHandshake(this); + stream.open(operationContext); + + InternalConnectionInitializationDescription initializationDescription = connectionInitializer.startHandshake(this, operationContext); initAfterHandshakeStart(initializationDescription); - initializationDescription = connectionInitializer.finishHandshake(this, initializationDescription); + initializationDescription = connectionInitializer.finishHandshake(this, initializationDescription, operationContext); initAfterHandshakeFinish(initializationDescription); } catch (Throwable t) { close(); @@ -241,14 +248,18 @@ public void open() { } @Override - public void openAsync(final SingleResultCallback callback) { + public void openAsync(final OperationContext originalOperationContext, final SingleResultCallback callback) { assertNull(stream); try { + OperationContext operationContext = originalOperationContext + .withTimeoutContext(originalOperationContext.getTimeoutContext().withComputedServerSelectionTimeoutContext()); + stream = streamFactory.create(serverId.getAddress()); - stream.openAsync(new AsyncCompletionHandler() { + stream.openAsync(operationContext, new AsyncCompletionHandler() { + @Override public void completed(@Nullable final Void aVoid) { - connectionInitializer.startHandshakeAsync(InternalStreamConnection.this, + connectionInitializer.startHandshakeAsync(InternalStreamConnection.this, operationContext, (initialResult, initialException) -> { if (initialException != null) { close(); @@ -257,7 +268,7 @@ public void completed(@Nullable final Void aVoid) { assertNotNull(initialResult); initAfterHandshakeStart(initialResult); connectionInitializer.finishHandshakeAsync(InternalStreamConnection.this, - initialResult, (completedResult, completedException) -> { + initialResult, operationContext, (completedResult, completedException) -> { if (completedException != null) { close(); callback.onResult(null, completedException); @@ -360,46 +371,46 @@ public boolean isClosed() { @Nullable @Override - public T sendAndReceive(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext) { - + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { Supplier sendAndReceiveInternal = () -> sendAndReceiveInternal( - message, decoder, sessionContext, requestContext, operationContext); + message, decoder, operationContext); try { return sendAndReceiveInternal.get(); } catch (MongoCommandException e) { if (reauthenticationIsTriggered(e)) { - return reauthenticateAndRetry(sendAndReceiveInternal); + return reauthenticateAndRetry(sendAndReceiveInternal, operationContext); } throw e; } } @Override - public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext, final SingleResultCallback callback) { + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, + final SingleResultCallback callback) { AsyncSupplier sendAndReceiveAsyncInternal = c -> sendAndReceiveAsyncInternal( - message, decoder, sessionContext, requestContext, operationContext, c); + message, decoder, operationContext, c); beginAsync().thenSupply(c -> { sendAndReceiveAsyncInternal.getAsync(c); }).onErrorIf(e -> reauthenticationIsTriggered(e), (t, c) -> { - reauthenticateAndRetryAsync(sendAndReceiveAsyncInternal, c); + reauthenticateAndRetryAsync(sendAndReceiveAsyncInternal, operationContext, c); }).finish(callback); } - private T reauthenticateAndRetry(final Supplier operation) { + private T reauthenticateAndRetry(final Supplier operation, final OperationContext operationContext) { authenticated.set(false); - assertNotNull(authenticator).reauthenticate(this); + assertNotNull(authenticator).reauthenticate(this, operationContext); authenticated.set(true); return operation.get(); } private void reauthenticateAndRetryAsync(final AsyncSupplier operation, + final OperationContext operationContext, final SingleResultCallback callback) { beginAsync().thenRun(c -> { authenticated.set(false); - assertNotNull(authenticator).reauthenticateAsync(this, c); + assertNotNull(authenticator).reauthenticateAsync(this, operationContext, c); }).thenSupply((c) -> { authenticated.set(true); operation.getAsync(c); @@ -419,15 +430,14 @@ public boolean reauthenticationIsTriggered(@Nullable final Throwable t) { @Nullable private T sendAndReceiveInternal(final CommandMessage message, final Decoder decoder, - final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext) { CommandEventSender commandEventSender; try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) { - message.encode(bsonOutput, sessionContext); - commandEventSender = createCommandEventSender(message, bsonOutput, requestContext, operationContext); + message.encode(bsonOutput, operationContext); + commandEventSender = createCommandEventSender(message, bsonOutput, operationContext); commandEventSender.sendStartedEvent(); try { - sendCommandMessage(message, bsonOutput, sessionContext); + sendCommandMessage(message, bsonOutput, operationContext); } catch (Exception e) { commandEventSender.sendFailedEvent(e); throw e; @@ -435,7 +445,7 @@ private T sendAndReceiveInternal(final CommandMessage message, final Decoder } if (message.isResponseExpected()) { - return receiveCommandMessageResponse(decoder, commandEventSender, sessionContext, 0); + return receiveCommandMessageResponse(decoder, commandEventSender, operationContext); } else { commandEventSender.sendSucceededEventForOneWayCommand(); return null; @@ -443,10 +453,10 @@ private T sendAndReceiveInternal(final CommandMessage message, final Decoder } @Override - public void send(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext) { + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) { - message.encode(bsonOutput, sessionContext); - sendCommandMessage(message, bsonOutput, sessionContext); + message.encode(bsonOutput, operationContext); + sendCommandMessage(message, bsonOutput, operationContext); if (message.isResponseExpected()) { hasMoreToCome = true; } @@ -454,15 +464,9 @@ public void send(final CommandMessage message, final Decoder decoder, fin } @Override - public T receive(final Decoder decoder, final SessionContext sessionContext) { + public T receive(final Decoder decoder, final OperationContext operationContext) { isTrue("Response is expected", hasMoreToCome); - return receiveCommandMessageResponse(decoder, new NoOpCommandEventSender(), sessionContext, 0); - } - - @Override - public T receive(final Decoder decoder, final SessionContext sessionContext, final int additionalTimeout) { - isTrue("Response is expected", hasMoreToCome); - return receiveCommandMessageResponse(decoder, new NoOpCommandEventSender(), sessionContext, additionalTimeout); + return receiveCommandMessageResponse(decoder, new NoOpCommandEventSender(), operationContext); } @Override @@ -470,56 +474,57 @@ public boolean hasMoreToCome() { return hasMoreToCome; } - private void sendCommandMessage(final CommandMessage message, - final ByteBufferBsonOutput bsonOutput, final SessionContext sessionContext) { + private void sendCommandMessage(final CommandMessage message, final ByteBufferBsonOutput bsonOutput, + final OperationContext operationContext) { Compressor localSendCompressor = sendCompressor; if (localSendCompressor == null || SECURITY_SENSITIVE_COMMANDS.contains(message.getCommandDocument(bsonOutput).getFirstKey())) { - List byteBuffers = bsonOutput.getByteBuffers(); - try { - sendMessage(byteBuffers, message.getId()); - } finally { - ResourceUtil.release(byteBuffers); - bsonOutput.close(); - } + trySendMessage(message, bsonOutput, operationContext); } else { ByteBufferBsonOutput compressedBsonOutput; List byteBuffers = bsonOutput.getByteBuffers(); try { CompressedMessage compressedMessage = new CompressedMessage(message.getOpCode(), byteBuffers, localSendCompressor, - getMessageSettings(description)); + getMessageSettings(description, initialServerDescription)); compressedBsonOutput = new ByteBufferBsonOutput(this); - compressedMessage.encode(compressedBsonOutput, sessionContext); + compressedMessage.encode(compressedBsonOutput, operationContext); } finally { ResourceUtil.release(byteBuffers); bsonOutput.close(); } - List compressedByteBuffers = compressedBsonOutput.getByteBuffers(); - try { - sendMessage(compressedByteBuffers, message.getId()); - } finally { - ResourceUtil.release(compressedByteBuffers); - compressedBsonOutput.close(); - } + trySendMessage(message, compressedBsonOutput, operationContext); } responseTo = message.getId(); } - private T receiveCommandMessageResponse(final Decoder decoder, - final CommandEventSender commandEventSender, final SessionContext sessionContext, - final int additionalTimeout) { + private void trySendMessage(final CommandMessage message, final ByteBufferBsonOutput bsonOutput, + final OperationContext operationContext) { + Timeout.onExistsAndExpired(operationContext.getTimeoutContext().timeoutIncludingRoundTrip(), () -> { + throw TimeoutContext.createMongoRoundTripTimeoutException(); + }); + List byteBuffers = bsonOutput.getByteBuffers(); + try { + sendMessage(byteBuffers, message.getId(), operationContext); + } finally { + ResourceUtil.release(byteBuffers); + bsonOutput.close(); + } + } + + private T receiveCommandMessageResponse(final Decoder decoder, final CommandEventSender commandEventSender, + final OperationContext operationContext) { boolean commandSuccessful = false; - try (ResponseBuffers responseBuffers = receiveMessageWithAdditionalTimeout(additionalTimeout)) { - updateSessionContext(sessionContext, responseBuffers); + try (ResponseBuffers responseBuffers = receiveResponseBuffers(operationContext)) { + updateSessionContext(operationContext.getSessionContext(), responseBuffers); if (!isCommandOk(responseBuffers)) { throw getCommandFailureException(responseBuffers.getResponseDocument(responseTo, - new BsonDocumentCodec()), description.getServerAddress()); + new BsonDocumentCodec()), description.getServerAddress(), operationContext.getTimeoutContext()); } commandSuccessful = true; commandEventSender.sendSucceededEvent(responseBuffers); - T commandResult = getCommandResult(decoder, responseBuffers, responseTo); + T commandResult = getCommandResult(decoder, responseBuffers, responseTo, operationContext.getTimeoutContext()); hasMoreToCome = responseBuffers.getReplyHeader().hasMoreToCome(); if (hasMoreToCome) { responseTo = responseBuffers.getReplyHeader().getRequestId(); @@ -536,8 +541,8 @@ private T receiveCommandMessageResponse(final Decoder decoder, } } - private void sendAndReceiveAsyncInternal(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext, final SingleResultCallback callback) { + private void sendAndReceiveAsyncInternal(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, final SingleResultCallback callback) { if (isClosed()) { callback.onResult(null, new MongoSocketClosedException("Can not read from a closed socket", getServerAddress())); return; @@ -547,24 +552,24 @@ private void sendAndReceiveAsyncInternal(final CommandMessage message, final ByteBufferBsonOutput compressedBsonOutput = new ByteBufferBsonOutput(this); try { - message.encode(bsonOutput, sessionContext); - CommandEventSender commandEventSender = createCommandEventSender(message, bsonOutput, requestContext, operationContext); + message.encode(bsonOutput, operationContext); + CommandEventSender commandEventSender = createCommandEventSender(message, bsonOutput, operationContext); commandEventSender.sendStartedEvent(); Compressor localSendCompressor = sendCompressor; if (localSendCompressor == null || SECURITY_SENSITIVE_COMMANDS.contains(message.getCommandDocument(bsonOutput).getFirstKey())) { - sendCommandMessageAsync(message.getId(), decoder, sessionContext, callback, bsonOutput, commandEventSender, + sendCommandMessageAsync(message.getId(), decoder, operationContext, callback, bsonOutput, commandEventSender, message.isResponseExpected()); } else { List byteBuffers = bsonOutput.getByteBuffers(); try { CompressedMessage compressedMessage = new CompressedMessage(message.getOpCode(), byteBuffers, localSendCompressor, - getMessageSettings(description)); - compressedMessage.encode(compressedBsonOutput, sessionContext); + getMessageSettings(description, initialServerDescription)); + compressedMessage.encode(compressedBsonOutput, operationContext); } finally { ResourceUtil.release(byteBuffers); bsonOutput.close(); } - sendCommandMessageAsync(message.getId(), decoder, sessionContext, callback, compressedBsonOutput, commandEventSender, + sendCommandMessageAsync(message.getId(), decoder, operationContext, callback, compressedBsonOutput, commandEventSender, message.isResponseExpected()); } } catch (Throwable t) { @@ -574,11 +579,21 @@ private void sendAndReceiveAsyncInternal(final CommandMessage message, final } } - private void sendCommandMessageAsync(final int messageId, final Decoder decoder, final SessionContext sessionContext, + private void sendCommandMessageAsync(final int messageId, final Decoder decoder, final OperationContext operationContext, final SingleResultCallback callback, final ByteBufferBsonOutput bsonOutput, final CommandEventSender commandEventSender, final boolean responseExpected) { List byteBuffers = bsonOutput.getByteBuffers(); - sendMessageAsync(byteBuffers, messageId, (result, t) -> { + + boolean[] shouldReturn = {false}; + Timeout.onExistsAndExpired(operationContext.getTimeoutContext().timeoutIncludingRoundTrip(), () -> { + callback.onResult(null, createMongoOperationTimeoutExceptionAndClose(commandEventSender)); + shouldReturn[0] = true; + }); + if (shouldReturn[0]) { + return; + } + + sendMessageAsync(byteBuffers, messageId, operationContext, (result, t) -> { ResourceUtil.release(byteBuffers); bsonOutput.close(); if (t != null) { @@ -588,7 +603,7 @@ private void sendCommandMessageAsync(final int messageId, final Decoder d commandEventSender.sendSucceededEventForOneWayCommand(); callback.onResult(null, null); } else { - readAsync(MESSAGE_HEADER_LENGTH, new MessageHeaderCallback((responseBuffers, t1) -> { + readAsync(MESSAGE_HEADER_LENGTH, operationContext, new MessageHeaderCallback(operationContext, (responseBuffers, t1) -> { if (t1 != null) { commandEventSender.sendFailedEvent(t1); callback.onResult(null, t1); @@ -596,20 +611,20 @@ private void sendCommandMessageAsync(final int messageId, final Decoder d } assertNotNull(responseBuffers); try { - updateSessionContext(sessionContext, responseBuffers); + updateSessionContext(operationContext.getSessionContext(), responseBuffers); boolean commandOk = isCommandOk(new BsonBinaryReader(new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer()))); responseBuffers.reset(); if (!commandOk) { MongoException commandFailureException = getCommandFailureException( responseBuffers.getResponseDocument(messageId, new BsonDocumentCodec()), - description.getServerAddress()); + description.getServerAddress(), operationContext.getTimeoutContext()); commandEventSender.sendFailedEvent(commandFailureException); throw commandFailureException; } commandEventSender.sendSucceededEvent(responseBuffers); - T result1 = getCommandResult(decoder, responseBuffers, messageId); + T result1 = getCommandResult(decoder, responseBuffers, messageId, operationContext.getTimeoutContext()); callback.onResult(result1, null); } catch (Throwable localThrowable) { callback.onResult(null, localThrowable); @@ -621,9 +636,24 @@ private void sendCommandMessageAsync(final int messageId, final Decoder d }); } - private T getCommandResult(final Decoder decoder, final ResponseBuffers responseBuffers, final int messageId) { + private MongoOperationTimeoutException createMongoOperationTimeoutExceptionAndClose(final CommandEventSender commandEventSender) { + MongoOperationTimeoutException e = TimeoutContext.createMongoRoundTripTimeoutException(); + close(); + commandEventSender.sendFailedEvent(e); + return e; + } + + private T getCommandResult(final Decoder decoder, + final ResponseBuffers responseBuffers, + final int messageId, + final TimeoutContext timeoutContext) { T result = new ReplyMessage<>(responseBuffers, decoder, messageId).getDocument(); - MongoException writeConcernBasedError = createSpecialWriteConcernException(responseBuffers, description.getServerAddress()); + MongoException writeConcernBasedError = createSpecialWriteConcernException(responseBuffers, + description.getServerAddress(), + timeoutContext); + if (writeConcernBasedError instanceof MongoOperationTimeoutException) { + throw writeConcernBasedError; + } if (writeConcernBasedError != null) { throw new MongoWriteConcernWithResponseException(writeConcernBasedError, result); } @@ -631,21 +661,24 @@ private T getCommandResult(final Decoder decoder, final ResponseBuffers r } @Override - public void sendMessage(final List byteBuffers, final int lastRequestId) { + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { notNull("stream is open", stream); if (isClosed()) { throw new MongoSocketClosedException("Cannot write to a closed stream", getServerAddress()); } try { - stream.write(byteBuffers); + stream.write(byteBuffers, operationContext); } catch (Exception e) { close(); - throwTranslatedWriteException(e); + throwTranslatedWriteException(e, operationContext); } } @Override - public void sendMessageAsync(final List byteBuffers, final int lastRequestId, + public void sendMessageAsync( + final List byteBuffers, + final int lastRequestId, + final OperationContext operationContext, final SingleResultCallback callback) { beginAsync().thenRun((c) -> { notNull("stream is open", stream); @@ -654,34 +687,26 @@ public void sendMessageAsync(final List byteBuffers, final int lastRequ } c.complete(c); }).thenRunTryCatchAsyncBlocks(c -> { - stream.writeAsync(byteBuffers, c.asHandler()); + stream.writeAsync(byteBuffers, operationContext, c.asHandler()); }, Exception.class, (e, c) -> { close(); - throwTranslatedWriteException(e); + throwTranslatedWriteException(e, operationContext); }).finish(errorHandlingCallback(callback, LOGGER)); } @Override - public ResponseBuffers receiveMessage(final int responseTo) { + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { assertNotNull(stream); if (isClosed()) { throw new MongoSocketClosedException("Cannot read from a closed stream", getServerAddress()); } - return receiveMessageWithAdditionalTimeout(0); - } - - private ResponseBuffers receiveMessageWithAdditionalTimeout(final int additionalTimeout) { - try { - return receiveResponseBuffers(additionalTimeout); - } catch (Throwable t) { - close(); - throw translateReadException(t); - } + return receiveResponseBuffers(operationContext); } @Override - public void receiveMessageAsync(final int responseTo, final SingleResultCallback callback) { + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { assertNotNull(stream); if (isClosed()) { @@ -689,7 +714,7 @@ public void receiveMessageAsync(final int responseTo, final SingleResultCallback return; } - readAsync(MESSAGE_HEADER_LENGTH, new MessageHeaderCallback((result, t) -> { + readAsync(MESSAGE_HEADER_LENGTH, operationContext, new MessageHeaderCallback(operationContext, (result, t) -> { if (t != null) { close(); callback.onResult(null, t); @@ -699,14 +724,14 @@ public void receiveMessageAsync(final int responseTo, final SingleResultCallback })); } - private void readAsync(final int numBytes, final SingleResultCallback callback) { + private void readAsync(final int numBytes, final OperationContext operationContext, final SingleResultCallback callback) { if (isClosed()) { callback.onResult(null, new MongoSocketClosedException("Cannot read from a closed stream", getServerAddress())); return; } try { - stream.readAsync(numBytes, new AsyncCompletionHandler() { + stream.readAsync(numBytes, operationContext, new AsyncCompletionHandler() { @Override public void completed(@Nullable final ByteBuf buffer) { callback.onResult(buffer, null); @@ -715,12 +740,12 @@ public void completed(@Nullable final ByteBuf buffer) { @Override public void failed(final Throwable t) { close(); - callback.onResult(null, translateReadException(t)); + callback.onResult(null, translateReadException(t, operationContext)); } }); } catch (Exception e) { close(); - callback.onResult(null, translateReadException(e)); + callback.onResult(null, translateReadException(e, operationContext)); } } @@ -744,25 +769,33 @@ private void updateSessionContext(final SessionContext sessionContext, final Res } } - private void throwTranslatedWriteException(final Throwable e) { - throw translateWriteException(e); - } + private void throwTranslatedWriteException(final Throwable e, final OperationContext operationContext) { + if (e instanceof MongoSocketWriteTimeoutException && operationContext.getTimeoutContext().hasTimeoutMS()) { + throw createMongoTimeoutException(e); + } - private MongoException translateWriteException(final Throwable e) { if (e instanceof MongoException) { - return (MongoException) e; + throw (MongoException) e; } Optional interruptedException = translateInterruptedException(e, "Interrupted while sending message"); if (interruptedException.isPresent()) { - return interruptedException.get(); + throw interruptedException.get(); } else if (e instanceof IOException) { - return new MongoSocketWriteException("Exception sending message", getServerAddress(), e); + throw new MongoSocketWriteException("Exception sending message", getServerAddress(), e); } else { - return new MongoInternalException("Unexpected exception", e); + throw new MongoInternalException("Unexpected exception", e); } } - private MongoException translateReadException(final Throwable e) { + private MongoException translateReadException(final Throwable e, final OperationContext operationContext) { + if (operationContext.getTimeoutContext().hasTimeoutMS()) { + if (e instanceof SocketTimeoutException) { + return createMongoTimeoutException(createReadTimeoutException((SocketTimeoutException) e)); + } else if (e instanceof MongoSocketReadTimeoutException) { + return createMongoTimeoutException((e)); + } + } + if (e instanceof MongoException) { return (MongoException) e; } @@ -770,7 +803,7 @@ private MongoException translateReadException(final Throwable e) { if (interruptedException.isPresent()) { return interruptedException.get(); } else if (e instanceof SocketTimeoutException) { - return new MongoSocketReadTimeoutException("Timeout while receiving message", getServerAddress(), e); + return createReadTimeoutException((SocketTimeoutException) e); } else if (e instanceof IOException) { return new MongoSocketReadException("Exception receiving message", getServerAddress(), e); } else if (e instanceof RuntimeException) { @@ -780,37 +813,47 @@ private MongoException translateReadException(final Throwable e) { } } - private ResponseBuffers receiveResponseBuffers(final int additionalTimeout) throws IOException { - ByteBuf messageHeaderBuffer = stream.read(MESSAGE_HEADER_LENGTH, additionalTimeout); - MessageHeader messageHeader; - try { - messageHeader = new MessageHeader(messageHeaderBuffer, description.getMaxMessageSize()); - } finally { - messageHeaderBuffer.release(); - } + private MongoSocketReadTimeoutException createReadTimeoutException(final SocketTimeoutException e) { + return new MongoSocketReadTimeoutException("Timeout while receiving message", + getServerAddress(), e); + } - ByteBuf messageBuffer = stream.read(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, additionalTimeout); - boolean releaseMessageBuffer = true; + private ResponseBuffers receiveResponseBuffers(final OperationContext operationContext) { try { - if (messageHeader.getOpCode() == OP_COMPRESSED.getValue()) { - CompressedHeader compressedHeader = new CompressedHeader(messageBuffer, messageHeader); + ByteBuf messageHeaderBuffer = stream.read(MESSAGE_HEADER_LENGTH, operationContext); + MessageHeader messageHeader; + try { + messageHeader = new MessageHeader(messageHeaderBuffer, description.getMaxMessageSize()); + } finally { + messageHeaderBuffer.release(); + } - Compressor compressor = getCompressor(compressedHeader); + ByteBuf messageBuffer = stream.read(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, operationContext); + boolean releaseMessageBuffer = true; + try { + if (messageHeader.getOpCode() == OP_COMPRESSED.getValue()) { + CompressedHeader compressedHeader = new CompressedHeader(messageBuffer, messageHeader); - ByteBuf buffer = getBuffer(compressedHeader.getUncompressedSize()); - compressor.uncompress(messageBuffer, buffer); + Compressor compressor = getCompressor(compressedHeader); - buffer.flip(); - return new ResponseBuffers(new ReplyHeader(buffer, compressedHeader), buffer); - } else { - ResponseBuffers responseBuffers = new ResponseBuffers(new ReplyHeader(messageBuffer, messageHeader), messageBuffer); - releaseMessageBuffer = false; - return responseBuffers; - } - } finally { - if (releaseMessageBuffer) { - messageBuffer.release(); + ByteBuf buffer = getBuffer(compressedHeader.getUncompressedSize()); + compressor.uncompress(messageBuffer, buffer); + + buffer.flip(); + return new ResponseBuffers(new ReplyHeader(buffer, compressedHeader), buffer); + } else { + ResponseBuffers responseBuffers = new ResponseBuffers(new ReplyHeader(messageBuffer, messageHeader), messageBuffer); + releaseMessageBuffer = false; + return responseBuffers; + } + } finally { + if (releaseMessageBuffer) { + messageBuffer.release(); + } } + } catch (Throwable t) { + close(); + throw translateReadException(t, operationContext); } } @@ -829,9 +872,11 @@ public ByteBuf getBuffer(final int size) { } private class MessageHeaderCallback implements SingleResultCallback { + private final OperationContext operationContext; private final SingleResultCallback callback; - MessageHeaderCallback(final SingleResultCallback callback) { + MessageHeaderCallback(final OperationContext operationContext, final SingleResultCallback callback) { + this.operationContext = operationContext; this.callback = callback; } @@ -844,7 +889,8 @@ public void onResult(@Nullable final ByteBuf result, @Nullable final Throwable t try { assertNotNull(result); MessageHeader messageHeader = new MessageHeader(result, description.getMaxMessageSize()); - readAsync(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, new MessageCallback(messageHeader)); + readAsync(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, operationContext, + new MessageCallback(messageHeader)); } catch (Throwable localThrowable) { callback.onResult(null, localThrowable); } finally { @@ -906,14 +952,14 @@ public void onResult(@Nullable final ByteBuf result, @Nullable final Throwable t private static final StructuredLogger COMMAND_PROTOCOL_LOGGER = new StructuredLogger("protocol.command"); private CommandEventSender createCommandEventSender(final CommandMessage message, final ByteBufferBsonOutput bsonOutput, - final RequestContext requestContext, final OperationContext operationContext) { + final OperationContext operationContext) { boolean listensOrLogs = commandListener != null || COMMAND_PROTOCOL_LOGGER.isRequired(DEBUG, getClusterId()); if (!recordEverything && (isMonitoringConnection || !opened() || !authenticated.get() || !listensOrLogs)) { return new NoOpCommandEventSender(); } return new LoggingCommandEventSender( SECURITY_SENSITIVE_COMMANDS, SECURITY_SENSITIVE_HELLO_COMMANDS, description, commandListener, - requestContext, operationContext, message, bsonOutput, + operationContext, message, bsonOutput, COMMAND_PROTOCOL_LOGGER, loggerSettings); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java index d4858f3d973..ee509873e40 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java @@ -50,6 +50,7 @@ *

      This class is not part of the public API and may be removed or changed at any time

      */ public class InternalStreamConnectionInitializer implements InternalConnectionInitializer { + private static final int INITIAL_MIN_RTT = 0; private final ClusterConnectionMode clusterConnectionMode; private final Authenticator authenticator; private final BsonDocument clientMetadataDocument; @@ -71,29 +72,31 @@ public InternalStreamConnectionInitializer(final ClusterConnectionMode clusterCo } @Override - public InternalConnectionInitializationDescription startHandshake(final InternalConnection internalConnection) { + public InternalConnectionInitializationDescription startHandshake(final InternalConnection internalConnection, + final OperationContext operationContext) { notNull("internalConnection", internalConnection); - return initializeConnectionDescription(internalConnection); + return initializeConnectionDescription(internalConnection, operationContext); } public InternalConnectionInitializationDescription finishHandshake(final InternalConnection internalConnection, - final InternalConnectionInitializationDescription description) { + final InternalConnectionInitializationDescription description, + final OperationContext operationContext) { notNull("internalConnection", internalConnection); notNull("description", description); final ConnectionDescription connectionDescription = description.getConnectionDescription(); if (Authenticator.shouldAuthenticate(authenticator, connectionDescription)) { - authenticator.authenticate(internalConnection, connectionDescription); + authenticator.authenticate(internalConnection, connectionDescription, operationContext); } - return completeConnectionDescriptionInitialization(internalConnection, description); + return completeConnectionDescriptionInitialization(internalConnection, description, operationContext); } @Override - public void startHandshakeAsync(final InternalConnection internalConnection, + public void startHandshakeAsync(final InternalConnection internalConnection, final OperationContext operationContext, final SingleResultCallback callback) { long startTime = System.nanoTime(); executeCommandAsync("admin", createHelloCommand(authenticator, internalConnection), clusterConnectionMode, serverApi, - internalConnection, (helloResult, t) -> { + internalConnection, operationContext, (helloResult, t) -> { if (t != null) { callback.onResult(null, t instanceof MongoException ? mapHelloException((MongoException) t) : t); } else { @@ -106,32 +109,36 @@ public void startHandshakeAsync(final InternalConnection internalConnection, @Override public void finishHandshakeAsync(final InternalConnection internalConnection, final InternalConnectionInitializationDescription description, + final OperationContext operationContext, final SingleResultCallback callback) { ConnectionDescription connectionDescription = description.getConnectionDescription(); if (!Authenticator.shouldAuthenticate(authenticator, connectionDescription)) { - completeConnectionDescriptionInitializationAsync(internalConnection, description, callback); + completeConnectionDescriptionInitializationAsync(internalConnection, description, operationContext, callback); } else { - authenticator.authenticateAsync(internalConnection, connectionDescription, + authenticator.authenticateAsync(internalConnection, connectionDescription, operationContext, (result1, t1) -> { if (t1 != null) { callback.onResult(null, t1); } else { - completeConnectionDescriptionInitializationAsync(internalConnection, description, callback); + completeConnectionDescriptionInitializationAsync(internalConnection, description, operationContext, callback); } }); } } - private InternalConnectionInitializationDescription initializeConnectionDescription(final InternalConnection internalConnection) { + private InternalConnectionInitializationDescription initializeConnectionDescription(final InternalConnection internalConnection, + final OperationContext operationContext) { BsonDocument helloResult; BsonDocument helloCommandDocument = createHelloCommand(authenticator, internalConnection); long start = System.nanoTime(); try { - helloResult = executeCommand("admin", helloCommandDocument, clusterConnectionMode, serverApi, internalConnection); + helloResult = executeCommand("admin", helloCommandDocument, clusterConnectionMode, serverApi, internalConnection, operationContext); } catch (MongoException e) { throw mapHelloException(e); + } finally { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); } setSpeculativeAuthenticateResponse(helloResult); return createInitializationDescription(helloResult, internalConnection, start); @@ -154,7 +161,7 @@ private InternalConnectionInitializationDescription createInitializationDescript helloResult); ServerDescription serverDescription = createServerDescription(internalConnection.getDescription().getServerAddress(), helloResult, - System.nanoTime() - startTime); + System.nanoTime() - startTime, INITIAL_MIN_RTT); return new InternalConnectionInitializationDescription(connectionDescription, serverDescription); } @@ -191,7 +198,8 @@ private BsonDocument createHelloCommand(final Authenticator authenticator, final private InternalConnectionInitializationDescription completeConnectionDescriptionInitialization( final InternalConnection internalConnection, - final InternalConnectionInitializationDescription description) { + final InternalConnectionInitializationDescription description, + final OperationContext operationContext) { if (description.getConnectionDescription().getConnectionId().getServerValue() != null) { return description; @@ -199,7 +207,7 @@ private InternalConnectionInitializationDescription completeConnectionDescriptio return applyGetLastErrorResult(executeCommandWithoutCheckingForFailure("admin", new BsonDocument("getlasterror", new BsonInt32(1)), clusterConnectionMode, serverApi, - internalConnection), + internalConnection, operationContext), description); } @@ -213,6 +221,7 @@ private void setSpeculativeAuthenticateResponse(final BsonDocument helloResult) private void completeConnectionDescriptionInitializationAsync( final InternalConnection internalConnection, final InternalConnectionInitializationDescription description, + final OperationContext operationContext, final SingleResultCallback callback) { if (description.getConnectionDescription().getConnectionId().getServerValue() != null) { @@ -221,7 +230,7 @@ private void completeConnectionDescriptionInitializationAsync( } executeCommandAsync("admin", new BsonDocument("getlasterror", new BsonInt32(1)), clusterConnectionMode, serverApi, - internalConnection, + internalConnection, operationContext, (result, t) -> { if (t != null) { callback.onResult(description, null); diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java index efc6c4bfb47..ba47236cf4f 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java @@ -18,6 +18,8 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoTimeoutException; import com.mongodb.ServerAddress; import com.mongodb.annotations.ThreadSafe; @@ -35,9 +37,11 @@ import com.mongodb.event.ClusterOpeningEvent; import com.mongodb.event.ServerDescriptionChangedEvent; import com.mongodb.internal.Locks; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.selector.ServerSelector; @@ -60,11 +64,9 @@ import static com.mongodb.internal.connection.BaseCluster.logServerSelectionStarted; import static com.mongodb.internal.connection.BaseCluster.logServerSelectionSucceeded; import static com.mongodb.internal.event.EventListenerHelper.singleClusterListener; -import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static java.lang.String.format; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.NANOSECONDS; @ThreadSafe @@ -181,9 +183,11 @@ public ClusterId getClusterId() { } @Override - public ServersSnapshot getServersSnapshot() { + public ServersSnapshot getServersSnapshot( + final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { isTrue("open", !isClosed()); - waitForSrv(); + waitForSrv(serverSelectionTimeout, timeoutContext); ClusterableServer server = assertNotNull(this.server); return serverAddress -> server; } @@ -203,36 +207,32 @@ public ClusterClock getClock() { @Override public ServerTuple selectServer(final ServerSelector serverSelector, final OperationContext operationContext) { isTrue("open", !isClosed()); - waitForSrv(); + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + waitForSrv(computedServerSelectionTimeout, operationContext.getTimeoutContext()); if (srvRecordResolvedToMultipleHosts) { throw createResolvedToMultipleHostsException(); } ClusterDescription curDescription = description; - logServerSelectionStarted(clusterId, operationContext, serverSelector, curDescription); + logServerSelectionStarted(clusterId, operationContext.getId(), serverSelector, curDescription); ServerTuple serverTuple = new ServerTuple(assertNotNull(server), curDescription.getServerDescriptions().get(0)); - logServerSelectionSucceeded(clusterId, operationContext, serverTuple.getServerDescription().getAddress(), serverSelector, curDescription); + logServerSelectionSucceeded(clusterId, operationContext.getId(), serverTuple.getServerDescription().getAddress(), + serverSelector, curDescription); return serverTuple; } - - private void waitForSrv() { + private void waitForSrv(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) { if (initializationCompleted) { return; } Locks.withLock(lock, () -> { - long remainingTimeNanos = getMaxWaitTimeNanos(); while (!initializationCompleted) { if (isClosed()) { throw createShutdownException(); } - if (remainingTimeNanos <= 0) { - throw createTimeoutException(); - } - try { - remainingTimeNanos = condition.awaitNanos(remainingTimeNanos); - } catch (InterruptedException e) { - throw interruptAndCreateMongoInterruptedException(format("Interrupted while resolving SRV records for %s", settings.getSrvHost()), e); - } + serverSelectionTimeout.onExpired(() -> { + throw createTimeoutException(timeoutContext); + }); + serverSelectionTimeout.awaitOn(condition, () -> format("resolving SRV records for %s", settings.getSrvHost())); } }); } @@ -244,9 +244,9 @@ public void selectServerAsync(final ServerSelector serverSelector, final Operati callback.onResult(null, createShutdownException()); return; } - - ServerSelectionRequest serverSelectionRequest = new ServerSelectionRequest( - operationContext, serverSelector, getMaxWaitTimeNanos(), callback); + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + ServerSelectionRequest serverSelectionRequest = new ServerSelectionRequest(operationContext.getId(), serverSelector, + operationContext, computedServerSelectionTimeout, callback); if (initializationCompleted) { handleServerSelectionRequest(serverSelectionRequest); } else { @@ -298,9 +298,9 @@ private void handleServerSelectionRequest(final ServerSelectionRequest serverSel } else { ClusterDescription curDescription = description; logServerSelectionStarted( - clusterId, serverSelectionRequest.operationContext, serverSelectionRequest.serverSelector, curDescription); + clusterId, serverSelectionRequest.operationId, serverSelectionRequest.serverSelector, curDescription); ServerTuple serverTuple = new ServerTuple(assertNotNull(server), curDescription.getServerDescriptions().get(0)); - logServerSelectionSucceeded(clusterId, serverSelectionRequest.operationContext, + logServerSelectionSucceeded(clusterId, serverSelectionRequest.operationId, serverTuple.getServerDescription().getAddress(), serverSelectionRequest.serverSelector, curDescription); serverSelectionRequest.onSuccess(serverTuple); } @@ -311,23 +311,20 @@ private MongoClientException createResolvedToMultipleHostsException() { + "to multiple hosts"); } - private MongoTimeoutException createTimeoutException() { + private MongoTimeoutException createTimeoutException(final TimeoutContext timeoutContext) { MongoException localSrvResolutionException = srvResolutionException; + String message; if (localSrvResolutionException == null) { - return new MongoTimeoutException(format("Timed out after %d ms while waiting to resolve SRV records for %s.", - settings.getServerSelectionTimeout(MILLISECONDS), settings.getSrvHost())); + message = format("Timed out while waiting to resolve SRV records for %s.", settings.getSrvHost()); } else { - return new MongoTimeoutException(format("Timed out after %d ms while waiting to resolve SRV records for %s. " - + "Resolution exception was '%s'", - settings.getServerSelectionTimeout(MILLISECONDS), settings.getSrvHost(), localSrvResolutionException)); + message = format("Timed out while waiting to resolve SRV records for %s. " + + "Resolution exception was '%s'", settings.getSrvHost(), localSrvResolutionException); } + return createTimeoutException(timeoutContext, message); } - private long getMaxWaitTimeNanos() { - if (settings.getServerSelectionTimeout(NANOSECONDS) < 0) { - return Long.MAX_VALUE; - } - return settings.getServerSelectionTimeout(NANOSECONDS); + private static MongoTimeoutException createTimeoutException(final TimeoutContext timeoutContext, final String message) { + return timeoutContext.hasTimeoutMS() ? new MongoOperationTimeoutException(message) : new MongoTimeoutException(message); } private void notifyWaitQueueHandler(final ServerSelectionRequest request) { @@ -362,32 +359,35 @@ public void run() { if (isClosed() || initializationCompleted) { break; } - long waitTimeNanos = Long.MAX_VALUE; - long curTimeNanos = System.nanoTime(); + Timeout waitTimeNanos = Timeout.infinite(); for (Iterator iterator = waitQueue.iterator(); iterator.hasNext();) { ServerSelectionRequest next = iterator.next(); - long remainingTime = next.getRemainingTime(curTimeNanos); - if (remainingTime <= 0) { - timeoutList.add(next); - iterator.remove(); - } else { - waitTimeNanos = Math.min(remainingTime, waitTimeNanos); - } + + Timeout nextTimeout = next.getTimeout(); + Timeout waitTimeNanosFinal = waitTimeNanos; + waitTimeNanos = nextTimeout.call(NANOSECONDS, + () -> Timeout.earliest(waitTimeNanosFinal, nextTimeout), + (ns) -> Timeout.earliest(waitTimeNanosFinal, nextTimeout), + () -> { + timeoutList.add(next); + iterator.remove(); + return waitTimeNanosFinal; + }); } if (timeoutList.isEmpty()) { try { - //noinspection ResultOfMethodCallIgnored - condition.await(waitTimeNanos, NANOSECONDS); - } catch (InterruptedException unexpected) { + waitTimeNanos.awaitOn(condition, () -> "ignored"); + } catch (MongoInterruptedException unexpected) { fail(); } } } finally { lock.unlock(); } - - timeoutList.forEach(request -> request.onError(createTimeoutException())); + timeoutList.forEach(request -> request.onError(createTimeoutException(request + .getOperationContext() + .getTimeoutContext()))); timeoutList.clear(); } @@ -405,24 +405,27 @@ public void run() { } private static final class ServerSelectionRequest { - private final OperationContext operationContext; + private final long operationId; private final ServerSelector serverSelector; - private final long maxWaitTimeNanos; - private final long startTimeNanos = System.nanoTime(); private final SingleResultCallback callback; + private final Timeout timeout; + private final OperationContext operationContext; - private ServerSelectionRequest( - final OperationContext operationContext, - final ServerSelector serverSelector, - final long maxWaitTimeNanos, final SingleResultCallback callback) { - this.operationContext = operationContext; + private ServerSelectionRequest(final long operationId, final ServerSelector serverSelector, final OperationContext operationContext, + final Timeout timeout, final SingleResultCallback callback) { + this.operationId = operationId; this.serverSelector = serverSelector; - this.maxWaitTimeNanos = maxWaitTimeNanos; + this.timeout = timeout; + this.operationContext = operationContext; this.callback = callback; } - long getRemainingTime(final long curTimeNanos) { - return startTimeNanos + maxWaitTimeNanos - curTimeNanos; + Timeout getTimeout() { + return timeout; + } + + OperationContext getOperationContext() { + return operationContext; } public void onSuccess(final ServerTuple serverTuple) { diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java index 0521e094cb1..bcd86fa5205 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java +++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java @@ -51,6 +51,7 @@ public class LoadBalancedClusterableServerFactory implements ClusterableServerFa private final MongoDriverInformation mongoDriverInformation; private final List compressorList; private final ServerApi serverApi; + private final InternalOperationContextFactory operationContextFactory; public LoadBalancedClusterableServerFactory(final ServerSettings serverSettings, final ConnectionPoolSettings connectionPoolSettings, @@ -59,7 +60,8 @@ public LoadBalancedClusterableServerFactory(final ServerSettings serverSettings, final LoggerSettings loggerSettings, @Nullable final CommandListener commandListener, @Nullable final String applicationName, final MongoDriverInformation mongoDriverInformation, - final List compressorList, @Nullable final ServerApi serverApi) { + final List compressorList, @Nullable final ServerApi serverApi, + final InternalOperationContextFactory operationContextFactory) { this.serverSettings = serverSettings; this.connectionPoolSettings = connectionPoolSettings; this.internalConnectionPoolSettings = internalConnectionPoolSettings; @@ -71,6 +73,7 @@ public LoadBalancedClusterableServerFactory(final ServerSettings serverSettings, this.mongoDriverInformation = mongoDriverInformation; this.compressorList = compressorList; this.serverApi = serverApi; + this.operationContextFactory = operationContextFactory; } @Override @@ -78,7 +81,7 @@ public ClusterableServer create(final Cluster cluster, final ServerAddress serve ConnectionPool connectionPool = new DefaultConnectionPool(new ServerId(cluster.getClusterId(), serverAddress), new InternalStreamConnectionFactory(ClusterConnectionMode.LOAD_BALANCED, streamFactory, credential, applicationName, mongoDriverInformation, compressorList, loggerSettings, commandListener, serverApi), - connectionPoolSettings, internalConnectionPoolSettings, EmptyProvider.instance()); + connectionPoolSettings, internalConnectionPoolSettings, EmptyProvider.instance(), operationContextFactory); connectionPool.ready(); return new LoadBalancedServer(new ServerId(cluster.getClusterId(), serverAddress), connectionPool, new DefaultConnectionFactory(), diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java index f55bd5c93dc..3820810ab9f 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java +++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java @@ -154,13 +154,13 @@ ConnectionPool getConnectionPool() { return connectionPool; } - private class LoadBalancedServerProtocolExecutor implements ProtocolExecutor { + private class LoadBalancedServerProtocolExecutor extends AbstractProtocolExecutor { @SuppressWarnings("unchecked") @Override public T execute(final CommandProtocol protocol, final InternalConnection connection, final SessionContext sessionContext) { try { - protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)); - return protocol.execute(connection); + return protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .execute(connection); } catch (MongoWriteConcernWithResponseException e) { return (T) e.getResponse(); } catch (MongoException e) { @@ -173,8 +173,8 @@ public T execute(final CommandProtocol protocol, final InternalConnection @Override public void executeAsync(final CommandProtocol protocol, final InternalConnection connection, final SessionContext sessionContext, final SingleResultCallback callback) { - protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)); - protocol.executeAsync(connection, errorHandlingCallback((result, t) -> { + protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .executeAsync(connection, errorHandlingCallback((result, t) -> { if (t != null) { if (t instanceof MongoWriteConcernWithResponseException) { callback.onResult((T) ((MongoWriteConcernWithResponseException) t).getResponse(), null); @@ -191,7 +191,7 @@ public void executeAsync(final CommandProtocol protocol, final InternalCo private void handleExecutionException(final InternalConnection connection, final SessionContext sessionContext, final Throwable t) { invalidate(t, connection.getDescription().getServiceId(), connection.getGeneration()); - if (t instanceof MongoSocketException && sessionContext.hasSession()) { + if (shouldMarkSessionDirty(t, sessionContext)) { sessionContext.markSessionDirty(); } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java b/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java index 6215bc8b98a..3821ca947c6 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java +++ b/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java @@ -18,7 +18,6 @@ import com.mongodb.LoggerSettings; import com.mongodb.MongoCommandException; -import com.mongodb.RequestContext; import com.mongodb.connection.ClusterId; import com.mongodb.connection.ConnectionDescription; import com.mongodb.event.CommandListener; @@ -36,7 +35,6 @@ import org.bson.json.JsonWriterSettings; import java.io.StringWriter; - import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -66,7 +64,6 @@ class LoggingCommandEventSender implements CommandEventSender { private final ConnectionDescription description; @Nullable private final CommandListener commandListener; - private final RequestContext requestContext; private final OperationContext operationContext; private final StructuredLogger logger; private final LoggerSettings loggerSettings; @@ -78,12 +75,14 @@ class LoggingCommandEventSender implements CommandEventSender { LoggingCommandEventSender(final Set securitySensitiveCommands, final Set securitySensitiveHelloCommands, final ConnectionDescription description, - @Nullable final CommandListener commandListener, final RequestContext requestContext, final OperationContext operationContext, - final CommandMessage message, final ByteBufferBsonOutput bsonOutput, final StructuredLogger logger, + @Nullable final CommandListener commandListener, + final OperationContext operationContext, + final CommandMessage message, + final ByteBufferBsonOutput bsonOutput, + final StructuredLogger logger, final LoggerSettings loggerSettings) { this.description = description; this.commandListener = commandListener; - this.requestContext = requestContext; this.operationContext = operationContext; this.logger = logger; this.loggerSettings = loggerSettings; @@ -113,7 +112,7 @@ public void sendStartedEvent() { ? new BsonDocument() : commandDocument; sendCommandStartedEvent(message, message.getNamespace().getDatabaseName(), commandName, commandDocumentForEvent, description, - assertNotNull(commandListener), requestContext, operationContext); + assertNotNull(commandListener), operationContext); } // the buffer underlying the command document may be released after the started event, so set to null to ensure it's not used // when sending the failed or succeeded event @@ -142,8 +141,8 @@ public void sendFailedEvent(final Throwable t) { } if (eventRequired()) { - sendCommandFailedEvent(message, message.getNamespace().getDatabaseName(), commandName, description, elapsedTimeNanos, - commandEventException, commandListener, requestContext, operationContext); + sendCommandFailedEvent(message, commandName, message.getNamespace().getDatabaseName(), description, elapsedTimeNanos, + commandEventException, commandListener, operationContext); } } @@ -179,8 +178,8 @@ private void sendSucceededEvent(final BsonDocument reply) { if (eventRequired()) { BsonDocument responseDocumentForEvent = redactionRequired ? new BsonDocument() : reply; - sendCommandSucceededEvent(message, message.getNamespace().getDatabaseName(), commandName, responseDocumentForEvent, description, - elapsedTimeNanos, commandListener, requestContext, operationContext); + sendCommandSucceededEvent(message, commandName, message.getNamespace().getDatabaseName(), responseDocumentForEvent, + description, elapsedTimeNanos, commandListener, operationContext); } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java index 3157635febf..7a5734bc140 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java +++ b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java @@ -49,6 +49,7 @@ public final class MessageSettings { private final int maxWireVersion; private final ServerType serverType; private final boolean sessionSupported; + private final boolean cryptd; /** * Gets the builder @@ -70,6 +71,7 @@ public static final class Builder { private int maxWireVersion; private ServerType serverType; private boolean sessionSupported; + private boolean cryptd; /** * Build it. @@ -127,6 +129,17 @@ public Builder sessionSupported(final boolean sessionSupported) { this.sessionSupported = sessionSupported; return this; } + + /** + * Set whether the server is a mongocryptd. + * + * @param cryptd true if the server is a mongocryptd. + * @return this + */ + public Builder cryptd(final boolean cryptd) { + this.cryptd = cryptd; + return this; + } } /** @@ -163,6 +176,9 @@ public int getMaxWireVersion() { public ServerType getServerType() { return serverType; } + public boolean isCryptd() { + return cryptd; + } public boolean isSessionSupported() { return sessionSupported; @@ -176,5 +192,6 @@ private MessageSettings(final Builder builder) { this.maxWireVersion = builder.maxWireVersion; this.serverType = builder.serverType; this.sessionSupported = builder.sessionSupported; + this.cryptd = builder.cryptd; } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java index 164d93aac9c..3d778ae0349 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java +++ b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java @@ -226,31 +226,35 @@ static OidcCallback getGcpCallback(final MongoCredential credential) { } @Override - public void reauthenticate(final InternalConnection connection) { + public void reauthenticate(final InternalConnection connection, final OperationContext operationContext) { assertTrue(connection.opened()); - authenticationLoop(connection, connection.getDescription()); + authenticationLoop(connection, connection.getDescription(), operationContext); } @Override - public void reauthenticateAsync(final InternalConnection connection, final SingleResultCallback callback) { + public void reauthenticateAsync(final InternalConnection connection, + final OperationContext operationContext, + final SingleResultCallback callback) { beginAsync().thenRun(c -> { assertTrue(connection.opened()); - authenticationLoopAsync(connection, connection.getDescription(), c); + authenticationLoopAsync(connection, connection.getDescription(), operationContext, c); }).finish(callback); } @Override - public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) { + public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { assertFalse(connection.opened()); - authenticationLoop(connection, connectionDescription); + authenticationLoop(connection, connectionDescription, operationContext); } @Override void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext, final SingleResultCallback callback) { beginAsync().thenRun(c -> { assertFalse(connection.opened()); - authenticationLoopAsync(connection, connectionDescription, c); + authenticationLoopAsync(connection, connectionDescription, operationContext, c); }).finish(callback); } @@ -266,11 +270,12 @@ private static boolean triggersRetry(@Nullable final Throwable t) { return false; } - private void authenticationLoop(final InternalConnection connection, final ConnectionDescription description) { + private void authenticationLoop(final InternalConnection connection, final ConnectionDescription description, + final OperationContext operationContext) { fallbackState = FallbackState.INITIAL; while (true) { try { - super.authenticate(connection, description); + super.authenticate(connection, description, operationContext); break; } catch (Exception e) { if (triggersRetry(e) && shouldRetryHandler()) { @@ -282,10 +287,12 @@ private void authenticationLoop(final InternalConnection connection, final Conne } private void authenticationLoopAsync(final InternalConnection connection, final ConnectionDescription description, + final OperationContext operationContext, final SingleResultCallback callback) { fallbackState = FallbackState.INITIAL; beginAsync().thenRunRetryingWhile( - c -> super.authenticateAsync(connection, description, c), + operationContext.getTimeoutContext(), + c -> super.authenticateAsync(connection, description, operationContext, c), e -> triggersRetry(e) && shouldRetryHandler() ).finish(callback); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java index 683f6adfbf8..bf29ebc051b 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java +++ b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java @@ -16,10 +16,17 @@ package com.mongodb.internal.connection; import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.RequestContext; import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ClusterType; import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import com.mongodb.selector.ServerSelector; @@ -27,6 +34,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; + import static java.util.stream.Collectors.toList; /** @@ -36,16 +44,93 @@ public class OperationContext { private static final AtomicLong NEXT_ID = new AtomicLong(0); private final long id; private final ServerDeprioritization serverDeprioritization; + private final SessionContext sessionContext; + private final RequestContext requestContext; + private final TimeoutContext timeoutContext; + @Nullable + private final ServerApi serverApi; + + public OperationContext(final RequestContext requestContext, final SessionContext sessionContext, final TimeoutContext timeoutContext, + @Nullable final ServerApi serverApi) { + this(NEXT_ID.incrementAndGet(), requestContext, sessionContext, timeoutContext, new ServerDeprioritization(), serverApi); + } + + public static OperationContext simpleOperationContext( + final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) { + return new OperationContext( + IgnorableRequestContext.INSTANCE, + NoOpSessionContext.INSTANCE, + new TimeoutContext(timeoutSettings), + serverApi); + } + + public static OperationContext simpleOperationContext(final TimeoutContext timeoutContext) { + return new OperationContext( + IgnorableRequestContext.INSTANCE, + NoOpSessionContext.INSTANCE, + timeoutContext, + null); + } + + public OperationContext withSessionContext(final SessionContext sessionContext) { + return new OperationContext(id, requestContext, sessionContext, timeoutContext, serverDeprioritization, serverApi); + } - public OperationContext() { - id = NEXT_ID.incrementAndGet(); - serverDeprioritization = new ServerDeprioritization(); + public OperationContext withTimeoutContext(final TimeoutContext timeoutContext) { + return new OperationContext(id, requestContext, sessionContext, timeoutContext, serverDeprioritization, serverApi); } public long getId() { return id; } + public SessionContext getSessionContext() { + return sessionContext; + } + + public RequestContext getRequestContext() { + return requestContext; + } + + public TimeoutContext getTimeoutContext() { + return timeoutContext; + } + + @Nullable + public ServerApi getServerApi() { + return serverApi; + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public OperationContext(final long id, + final RequestContext requestContext, + final SessionContext sessionContext, + final TimeoutContext timeoutContext, + final ServerDeprioritization serverDeprioritization, + @Nullable final ServerApi serverApi) { + this.id = id; + this.serverDeprioritization = serverDeprioritization; + this.requestContext = requestContext; + this.sessionContext = sessionContext; + this.timeoutContext = timeoutContext; + this.serverApi = serverApi; + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public OperationContext(final long id, + final RequestContext requestContext, + final SessionContext sessionContext, + final TimeoutContext timeoutContext, + @Nullable final ServerApi serverApi) { + this.id = id; + this.serverDeprioritization = new ServerDeprioritization(); + this.requestContext = requestContext; + this.sessionContext = sessionContext; + this.timeoutContext = timeoutContext; + this.serverApi = serverApi; + } + + /** * @return The same {@link ServerDeprioritization} if called on the same {@link OperationContext}. */ @@ -114,3 +199,4 @@ private boolean isEnabled(final ClusterType clusterType) { } } } + diff --git a/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java index 23287362502..c6ad5f451a0 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java @@ -26,11 +26,13 @@ import com.mongodb.RequestContext; import com.mongodb.ServerAddress; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; import com.mongodb.event.CommandFailedEvent; import com.mongodb.event.CommandListener; import com.mongodb.event.CommandStartedEvent; import com.mongodb.event.CommandSucceededEvent; import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.lang.Nullable; @@ -83,12 +85,14 @@ static boolean isCommandOk(final ResponseBuffers responseBuffers) { } @Nullable - static MongoException createSpecialWriteConcernException(final ResponseBuffers responseBuffers, final ServerAddress serverAddress) { + static MongoException createSpecialWriteConcernException(final ResponseBuffers responseBuffers, + final ServerAddress serverAddress, + final TimeoutContext timeoutContext) { BsonValue writeConcernError = getField(createBsonReader(responseBuffers), "writeConcernError"); if (writeConcernError == null) { return null; } else { - return createSpecialException(writeConcernError.asDocument(), serverAddress, "errmsg"); + return createSpecialException(writeConcernError.asDocument(), serverAddress, "errmsg", timeoutContext); } } @@ -197,8 +201,9 @@ private static boolean isCommandOk(@Nullable final BsonValue okValue) { } } - static MongoException getCommandFailureException(final BsonDocument response, final ServerAddress serverAddress) { - MongoException specialException = createSpecialException(response, serverAddress, "errmsg"); + static MongoException getCommandFailureException(final BsonDocument response, final ServerAddress serverAddress, + final TimeoutContext timeoutContext) { + MongoException specialException = createSpecialException(response, serverAddress, "errmsg", timeoutContext); if (specialException != null) { return specialException; } @@ -213,15 +218,16 @@ static String getErrorMessage(final BsonDocument response, final String errorMes return response.getString(errorMessageFieldName, new BsonString("")).getValue(); } - static MongoException getQueryFailureException(final BsonDocument errorDocument, final ServerAddress serverAddress) { - MongoException specialException = createSpecialException(errorDocument, serverAddress, "$err"); + static MongoException getQueryFailureException(final BsonDocument errorDocument, final ServerAddress serverAddress, + final TimeoutContext timeoutContext) { + MongoException specialException = createSpecialException(errorDocument, serverAddress, "$err", timeoutContext); if (specialException != null) { return specialException; } return new MongoQueryException(errorDocument, serverAddress); } - static MessageSettings getMessageSettings(final ConnectionDescription connectionDescription) { + static MessageSettings getMessageSettings(final ConnectionDescription connectionDescription, final ServerDescription serverDescription) { return MessageSettings.builder() .maxDocumentSize(connectionDescription.getMaxDocumentSize()) .maxMessageSize(connectionDescription.getMaxMessageSize()) @@ -229,6 +235,7 @@ static MessageSettings getMessageSettings(final ConnectionDescription connection .maxWireVersion(connectionDescription.getMaxWireVersion()) .serverType(connectionDescription.getServerType()) .sessionSupported(connectionDescription.getLogicalSessionTimeoutMinutes() != null) + .cryptd(serverDescription.isCryptd()) .build(); } @@ -238,22 +245,28 @@ static MessageSettings getMessageSettings(final ConnectionDescription connection private static final List RECOVERING_MESSAGES = asList("not master or secondary", "node is recovering"); @Nullable - public static MongoException createSpecialException(@Nullable final BsonDocument response, final ServerAddress serverAddress, - final String errorMessageFieldName) { + public static MongoException createSpecialException(@Nullable final BsonDocument response, + final ServerAddress serverAddress, + final String errorMessageFieldName, + final TimeoutContext timeoutContext) { if (response == null) { return null; } int errorCode = getErrorCode(response); String errorMessage = getErrorMessage(response, errorMessageFieldName); if (ErrorCategory.fromErrorCode(errorCode) == ErrorCategory.EXECUTION_TIMEOUT) { - return new MongoExecutionTimeoutException(errorCode, errorMessage, response); + MongoExecutionTimeoutException mongoExecutionTimeoutException = new MongoExecutionTimeoutException(errorCode, errorMessage, response); + if (timeoutContext.hasTimeoutMS()) { + return TimeoutContext.createMongoTimeoutException(mongoExecutionTimeoutException); + } + return mongoExecutionTimeoutException; } else if (isNodeIsRecoveringError(errorCode, errorMessage)) { return new MongoNodeIsRecoveringException(response, serverAddress); } else if (isNotPrimaryError(errorCode, errorMessage)) { return new MongoNotPrimaryException(response, serverAddress); } else if (response.containsKey("writeConcernError")) { MongoException writeConcernException = createSpecialException(response.getDocument("writeConcernError"), serverAddress, - "errmsg"); + "errmsg", timeoutContext); if (writeConcernException != null && response.isArray("errorLabels")) { for (BsonValue errorLabel : response.getArray("errorLabels")) { writeConcernException.addLabel(errorLabel.asString().getValue()); @@ -277,11 +290,11 @@ private static boolean isNodeIsRecoveringError(final int errorCode, final String static void sendCommandStartedEvent(final RequestMessage message, final String databaseName, final String commandName, final BsonDocument command, final ConnectionDescription connectionDescription, - final CommandListener commandListener, final RequestContext requestContext, final OperationContext operationContext) { - notNull("requestContext", requestContext); + final CommandListener commandListener, final OperationContext operationContext) { + notNull("operationContext", operationContext); try { - commandListener.commandStarted(new CommandStartedEvent(getRequestContextForEvent(requestContext), operationContext.getId(), message.getId(), - connectionDescription, databaseName, commandName, command)); + commandListener.commandStarted(new CommandStartedEvent(getRequestContextForEvent(operationContext.getRequestContext()), + operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, command)); } catch (Exception e) { if (PROTOCOL_EVENT_LOGGER.isWarnEnabled()) { PROTOCOL_EVENT_LOGGER.warn(format("Exception thrown raising command started event to listener %s", commandListener), e); @@ -289,12 +302,13 @@ static void sendCommandStartedEvent(final RequestMessage message, final String d } } - static void sendCommandSucceededEvent(final RequestMessage message, final String databaseName, final String commandName, + static void sendCommandSucceededEvent(final RequestMessage message, final String commandName, final String databaseName, final BsonDocument response, final ConnectionDescription connectionDescription, final long elapsedTimeNanos, - final CommandListener commandListener, final RequestContext requestContext, final OperationContext operationContext) { - notNull("requestContext", requestContext); + final CommandListener commandListener, final OperationContext operationContext) { + notNull("operationContext", operationContext); try { - commandListener.commandSucceeded(new CommandSucceededEvent(getRequestContextForEvent(requestContext), + + commandListener.commandSucceeded(new CommandSucceededEvent(getRequestContextForEvent(operationContext.getRequestContext()), operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, response, elapsedTimeNanos)); } catch (Exception e) { @@ -304,15 +318,15 @@ static void sendCommandSucceededEvent(final RequestMessage message, final String } } - static void sendCommandFailedEvent(final RequestMessage message, final String databaseName, final String commandName, + static void sendCommandFailedEvent(final RequestMessage message, final String commandName, final String databaseName, final ConnectionDescription connectionDescription, final long elapsedTimeNanos, - final Throwable throwable, final CommandListener commandListener, final RequestContext requestContext, - final OperationContext operationContext) { - notNull("requestContext", requestContext); + final Throwable throwable, final CommandListener commandListener, final OperationContext operationContext) { + notNull("operationContext", operationContext); try { - commandListener.commandFailed(new CommandFailedEvent(getRequestContextForEvent(requestContext), + commandListener.commandFailed(new CommandFailedEvent(getRequestContextForEvent(operationContext.getRequestContext()), operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, elapsedTimeNanos, throwable)); + } catch (Exception e) { if (PROTOCOL_EVENT_LOGGER.isWarnEnabled()) { PROTOCOL_EVENT_LOGGER.warn(format("Exception thrown raising command failed event to listener %s", commandListener), e); diff --git a/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java index f170cafdb00..86e2ebd1dbe 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java @@ -16,7 +16,6 @@ package com.mongodb.internal.connection; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonBinaryWriter; import org.bson.BsonBinaryWriterSettings; @@ -127,13 +126,13 @@ public MessageSettings getSettings() { * Encoded the message to the given output. * * @param bsonOutput the output - * @param sessionContext the session context + * @param operationContext the session context */ - public void encode(final BsonOutput bsonOutput, final SessionContext sessionContext) { - notNull("sessionContext", sessionContext); + public void encode(final BsonOutput bsonOutput, final OperationContext operationContext) { + notNull("operationContext", operationContext); int messageStartPosition = bsonOutput.getPosition(); writeMessagePrologue(bsonOutput); - EncodingMetadata encodingMetadata = encodeMessageBodyWithMetadata(bsonOutput, sessionContext); + EncodingMetadata encodingMetadata = encodeMessageBodyWithMetadata(bsonOutput, operationContext); backpatchMessageLength(messageStartPosition, bsonOutput); this.encodingMetadata = encodingMetadata; } @@ -163,10 +162,10 @@ protected void writeMessagePrologue(final BsonOutput bsonOutput) { * Encode the message body to the given output. * * @param bsonOutput the output - * @param sessionContext the session context + * @param operationContext the session context * @return the encoding metadata */ - protected abstract EncodingMetadata encodeMessageBodyWithMetadata(BsonOutput bsonOutput, SessionContext sessionContext); + protected abstract EncodingMetadata encodeMessageBodyWithMetadata(BsonOutput bsonOutput, OperationContext operationContext); protected void addDocument(final BsonDocument document, final BsonOutput bsonOutput, final FieldNameValidator validator, @Nullable final List extraElements) { diff --git a/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java b/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java new file mode 100644 index 00000000000..ffba2caecc4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; + +import java.util.Deque; +import java.util.concurrent.ConcurrentLinkedDeque; + +final class RoundTripTimeSampler { + private final ExponentiallyWeightedMovingAverage averageRoundTripTime = new ExponentiallyWeightedMovingAverage(0.2); + private final RecentSamples recentSamples = new RecentSamples(); + + void reset() { + averageRoundTripTime.reset(); + recentSamples.reset(); + } + + void addSample(final long sample) { + recentSamples.add(sample); + averageRoundTripTime.addSample(sample); + } + + long getAverage() { + return averageRoundTripTime.getAverage(); + } + + long getMin() { + return recentSamples.min(); + } + + @ThreadSafe + private static final class RecentSamples { + + private static final int MAX_SIZE = 10; + private final Deque samples; + + RecentSamples() { + samples = new ConcurrentLinkedDeque<>(); + } + + void add(final long sample) { + if (samples.size() == MAX_SIZE) { + samples.removeFirst(); + } + samples.add(sample); + } + + void reset() { + samples.clear(); + } + + long min() { + // Clients MUST report the minimum RTT as 0 until at least 2 samples have been gathered + return samples.size() < 2 ? 0 : samples.stream().min(Long::compareTo).orElse(0L); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java index 6e4bea55514..900d9a14e16 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java @@ -20,6 +20,7 @@ import com.mongodb.MongoCredential; import com.mongodb.MongoException; import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoSecurityException; import com.mongodb.ServerAddress; import com.mongodb.ServerApi; @@ -61,13 +62,13 @@ abstract class SaslAuthenticator extends Authenticator implements SpeculativeAut super(credential, clusterConnectionMode, serverApi); } - @Override - public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) { + public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { doAsSubject(() -> { SaslClient saslClient = createSaslClient(connection.getDescription().getServerAddress()); throwIfSaslClientIsNull(saslClient); try { - BsonDocument responseDocument = getNextSaslResponse(saslClient, connection); + BsonDocument responseDocument = getNextSaslResponse(saslClient, connection, operationContext); BsonInt32 conversationId = responseDocument.getInt32("conversationId"); while (!(responseDocument.getBoolean("done")).getValue()) { @@ -79,7 +80,8 @@ public void authenticate(final InternalConnection connection, final ConnectionDe + getMongoCredential()); } - responseDocument = sendSaslContinue(conversationId, response, connection); + responseDocument = sendSaslContinue(conversationId, response, connection, operationContext); + operationContext.getTimeoutContext().resetMaintenanceTimeout(); } if (!saslClient.isComplete()) { saslClient.evaluateChallenge((responseDocument.getBinary("payload")).getData()); @@ -100,12 +102,12 @@ public void authenticate(final InternalConnection connection, final ConnectionDe @Override void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { try { doAsSubject(() -> { SaslClient saslClient = createSaslClient(connection.getDescription().getServerAddress()); throwIfSaslClientIsNull(saslClient); - getNextSaslResponseAsync(saslClient, connection, callback); + getNextSaslResponseAsync(saslClient, connection, operationContext, callback); return null; }); } catch (Throwable t) { @@ -127,7 +129,8 @@ private void throwIfSaslClientIsNull(@Nullable final SaslClient saslClient) { } } - private BsonDocument getNextSaslResponse(final SaslClient saslClient, final InternalConnection connection) { + private BsonDocument getNextSaslResponse(final SaslClient saslClient, final InternalConnection connection, + final OperationContext operationContext) { BsonDocument response = connection.opened() ? null : getSpeculativeAuthenticateResponse(); if (response != null) { return response; @@ -135,20 +138,20 @@ private BsonDocument getNextSaslResponse(final SaslClient saslClient, final Inte try { byte[] serverResponse = saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null; - return sendSaslStart(serverResponse, connection); + return sendSaslStart(serverResponse, connection, operationContext); } catch (Exception e) { throw wrapException(e); } } private void getNextSaslResponseAsync(final SaslClient saslClient, final InternalConnection connection, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); try { BsonDocument response = connection.opened() ? null : getSpeculativeAuthenticateResponse(); if (response == null) { byte[] serverResponse = (saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null); - sendSaslStartAsync(serverResponse, connection, (result, t) -> { + sendSaslStartAsync(serverResponse, connection, operationContext, (result, t) -> { if (t != null) { errHandlingCallback.onResult(null, wrapException(t)); return; @@ -157,13 +160,13 @@ private void getNextSaslResponseAsync(final SaslClient saslClient, final Interna if (result.getBoolean("done").getValue()) { verifySaslClientComplete(saslClient, result, errHandlingCallback); } else { - new Continuator(saslClient, result, connection, errHandlingCallback).start(); + new Continuator(saslClient, result, connection, operationContext, errHandlingCallback).start(); } }); } else if (response.getBoolean("done").getValue()) { verifySaslClientComplete(saslClient, response, errHandlingCallback); } else { - new Continuator(saslClient, response, connection, errHandlingCallback).start(); + new Continuator(saslClient, response, connection, operationContext, errHandlingCallback).start(); } } catch (Exception e) { callback.onResult(null, wrapException(e)); @@ -225,29 +228,47 @@ protected SubjectProvider getDefaultSubjectProvider() { return () -> null; } - private BsonDocument sendSaslStart(@Nullable final byte[] outToken, final InternalConnection connection) { + private BsonDocument sendSaslStart(@Nullable final byte[] outToken, final InternalConnection connection, + final OperationContext operationContext) { BsonDocument startDocument = createSaslStartCommandDocument(outToken); appendSaslStartOptions(startDocument); - return executeCommand(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection); + try { + return executeCommand(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection, + operationContext); + } finally { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + } } - private BsonDocument sendSaslContinue(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection) { - return executeCommand(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken), - getClusterConnectionMode(), getServerApi(), connection); + private BsonDocument sendSaslContinue(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection, + final OperationContext operationContext) { + try { + return executeCommand(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken), + getClusterConnectionMode(), getServerApi(), connection, operationContext); + } finally { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + } } private void sendSaslStartAsync(@Nullable final byte[] outToken, final InternalConnection connection, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { BsonDocument startDocument = createSaslStartCommandDocument(outToken); appendSaslStartOptions(startDocument); + executeCommandAsync(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection, - callback); + operationContext, (r, t) -> { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + callback.onResult(r, t); + }); } private void sendSaslContinueAsync(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { executeCommandAsync(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken), - getClusterConnectionMode(), getServerApi(), connection, callback); + getClusterConnectionMode(), getServerApi(), connection, operationContext, (r, t) -> { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + callback.onResult(r, t); + }); } protected BsonDocument createSaslStartCommandDocument(@Nullable final byte[] outToken) { @@ -271,6 +292,8 @@ private void disposeOfSaslClient(final SaslClient saslClient) { protected MongoException wrapException(final Throwable t) { if (t instanceof MongoInterruptedException) { return (MongoInterruptedException) t; + } else if (t instanceof MongoOperationTimeoutException) { + return (MongoOperationTimeoutException) t; } else if (t instanceof MongoSecurityException) { return (MongoSecurityException) t; } else { @@ -300,13 +323,15 @@ private final class Continuator implements SingleResultCallback { private final SaslClient saslClient; private final BsonDocument saslStartDocument; private final InternalConnection connection; + private final OperationContext operationContext; private final SingleResultCallback callback; Continuator(final SaslClient saslClient, final BsonDocument saslStartDocument, final InternalConnection connection, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { this.saslClient = saslClient; this.saslStartDocument = saslStartDocument; this.connection = connection; + this.operationContext = operationContext; this.callback = callback; } @@ -335,13 +360,13 @@ private void continueConversation(final BsonDocument result) { doAsSubject(() -> { try { sendSaslContinueAsync(saslStartDocument.getInt32("conversationId"), - saslClient.evaluateChallenge((result.getBinary("payload")).getData()), connection, Continuator.this); + saslClient.evaluateChallenge((result.getBinary("payload")).getData()), connection, + operationContext, Continuator.this); } catch (SaslException e) { throw wrapException(e); } return null; }); - } catch (Throwable t) { callback.onResult(null, t); disposeOfSaslClient(saslClient); diff --git a/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java index 3c9d3b126bf..daeb67be54d 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java @@ -24,9 +24,11 @@ import com.mongodb.connection.ClusterType; import com.mongodb.connection.ServerDescription; import com.mongodb.connection.ServerType; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; -import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.time.Timeout; import java.util.concurrent.atomic.AtomicReference; @@ -68,7 +70,9 @@ protected void connect() { } @Override - public ServersSnapshot getServersSnapshot() { + public ServersSnapshot getServersSnapshot( + final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { isTrue("open", !isClosed()); ClusterableServer server = assertNotNull(this.server.get()); return serverAddress -> server; diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java b/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java index 7ee08fd967c..a1c3ed0d914 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java @@ -38,15 +38,14 @@ import java.net.SocketTimeoutException; import java.util.Iterator; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.throwMongoTimeoutException; import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses; import static com.mongodb.internal.connection.SocketStreamHelper.configureSocket; import static com.mongodb.internal.connection.SslHelper.configureSslSocket; import static com.mongodb.internal.thread.InterruptionUtil.translateInterruptedException; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      This class is not part of the public API and may be removed or changed at any time

      @@ -75,9 +74,9 @@ public SocketStream(final ServerAddress address, final InetAddressResolver inetA } @Override - public void open() { + public void open(final OperationContext operationContext) { try { - socket = initializeSocket(); + socket = initializeSocket(operationContext); outputStream = socket.getOutputStream(); inputStream = socket.getInputStream(); } catch (IOException e) { @@ -87,22 +86,22 @@ public void open() { } } - protected Socket initializeSocket() throws IOException { + protected Socket initializeSocket(final OperationContext operationContext) throws IOException { ProxySettings proxySettings = settings.getProxySettings(); if (proxySettings.isProxyEnabled()) { if (sslSettings.isEnabled()) { assertTrue(socketFactory instanceof SSLSocketFactory); SSLSocketFactory sslSocketFactory = (SSLSocketFactory) socketFactory; - return initializeSslSocketOverSocksProxy(sslSocketFactory); + return initializeSslSocketOverSocksProxy(operationContext, sslSocketFactory); } - return initializeSocketOverSocksProxy(); + return initializeSocketOverSocksProxy(operationContext); } Iterator inetSocketAddresses = getSocketAddresses(address, inetAddressResolver).iterator(); while (inetSocketAddresses.hasNext()) { Socket socket = socketFactory.createSocket(); try { - SocketStreamHelper.initialize(socket, inetSocketAddresses.next(), settings, sslSettings); + SocketStreamHelper.initialize(operationContext, socket, inetSocketAddresses.next(), settings, sslSettings); return socket; } catch (SocketTimeoutException e) { if (!inetSocketAddresses.hasNext()) { @@ -114,14 +113,15 @@ protected Socket initializeSocket() throws IOException { throw new MongoSocketException("Exception opening socket", getAddress()); } - private SSLSocket initializeSslSocketOverSocksProxy(final SSLSocketFactory sslSocketFactory) throws IOException { + private SSLSocket initializeSslSocketOverSocksProxy(final OperationContext operationContext, + final SSLSocketFactory sslSocketFactory) throws IOException { final String serverHost = address.getHost(); final int serverPort = address.getPort(); SocksSocket socksProxy = new SocksSocket(settings.getProxySettings()); - configureSocket(socksProxy, settings); + configureSocket(socksProxy, operationContext, settings); InetSocketAddress inetSocketAddress = toSocketAddress(serverHost, serverPort); - socksProxy.connect(inetSocketAddress, settings.getConnectTimeout(MILLISECONDS)); + socksProxy.connect(inetSocketAddress, operationContext.getTimeoutContext().getConnectTimeoutMs()); SSLSocket sslSocket = (SSLSocket) sslSocketFactory.createSocket(socksProxy, serverHost, serverPort, true); //Even though Socks proxy connection is already established, TLS handshake has not been performed yet. @@ -139,9 +139,9 @@ private static InetSocketAddress toSocketAddress(final String serverHost, final return InetSocketAddress.createUnresolved(serverHost, serverPort); } - private Socket initializeSocketOverSocksProxy() throws IOException { + private Socket initializeSocketOverSocksProxy(final OperationContext operationContext) throws IOException { Socket createdSocket = socketFactory.createSocket(); - configureSocket(createdSocket, settings); + configureSocket(createdSocket, operationContext, settings); /* Wrap the configured socket with SocksSocket to add extra functionality. Reason for separate steps: We can't directly extend Java 11 methods within 'SocksSocket' @@ -150,7 +150,7 @@ private Socket initializeSocketOverSocksProxy() throws IOException { SocksSocket socksProxy = new SocksSocket(createdSocket, settings.getProxySettings()); socksProxy.connect(toSocketAddress(address.getHost(), address.getPort()), - settings.getConnectTimeout(TimeUnit.MILLISECONDS)); + operationContext.getTimeoutContext().getConnectTimeoutMs()); return socksProxy; } @@ -160,60 +160,58 @@ public ByteBuf getBuffer(final int size) { } @Override - public void write(final List buffers) throws IOException { + public void write(final List buffers, final OperationContext operationContext) throws IOException { for (final ByteBuf cur : buffers) { outputStream.write(cur.array(), 0, cur.limit()); + operationContext.getTimeoutContext().onExpired(() -> { + throwMongoTimeoutException("Socket write exceeded the timeout limit."); + }); } } @Override - public ByteBuf read(final int numBytes) throws IOException { - ByteBuf buffer = bufferProvider.getBuffer(numBytes); + public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException { try { - int totalBytesRead = 0; - byte[] bytes = buffer.array(); - while (totalBytesRead < buffer.limit()) { - int bytesRead = inputStream.read(bytes, totalBytesRead, buffer.limit() - totalBytesRead); - if (bytesRead == -1) { - throw new MongoSocketReadException("Prematurely reached end of stream", getAddress()); + ByteBuf buffer = bufferProvider.getBuffer(numBytes); + try { + int totalBytesRead = 0; + byte[] bytes = buffer.array(); + while (totalBytesRead < buffer.limit()) { + int readTimeoutMS = (int) operationContext.getTimeoutContext().getReadTimeoutMS(); + socket.setSoTimeout(readTimeoutMS); + int bytesRead = inputStream.read(bytes, totalBytesRead, buffer.limit() - totalBytesRead); + if (bytesRead == -1) { + throw new MongoSocketReadException("Prematurely reached end of stream", getAddress()); + } + totalBytesRead += bytesRead; } - totalBytesRead += bytesRead; + return buffer; + } catch (Exception e) { + buffer.release(); + throw e; } - return buffer; - } catch (Exception e) { - buffer.release(); - throw e; - } - } - - @Override - public ByteBuf read(final int numBytes, final int additionalTimeout) throws IOException { - int curTimeout = socket.getSoTimeout(); - if (curTimeout > 0 && additionalTimeout > 0) { - socket.setSoTimeout(curTimeout + additionalTimeout); - } - try { - return read(numBytes); } finally { if (!socket.isClosed()) { // `socket` may be closed if the current thread is virtual, and it is interrupted while reading - socket.setSoTimeout(curTimeout); + socket.setSoTimeout(0); } } } @Override - public void openAsync(final AsyncCompletionHandler handler) { + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations."); } @Override - public void writeAsync(final List buffers, final AsyncCompletionHandler handler) { + public void writeAsync(final List buffers, final OperationContext operationContext, + final AsyncCompletionHandler handler) { throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations."); } @Override - public void readAsync(final int numBytes, final AsyncCompletionHandler handler) { + public void readAsync(final int numBytes, final OperationContext operationContext, + final AsyncCompletionHandler handler) { throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations."); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java index 1b5e789e646..74098c4ede6 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java @@ -28,7 +28,6 @@ import java.net.SocketOption; import static com.mongodb.internal.connection.SslHelper.configureSslSocket; -import static java.util.concurrent.TimeUnit.MILLISECONDS; @SuppressWarnings({"unchecked", "rawtypes"}) final class SocketStreamHelper { @@ -69,17 +68,21 @@ final class SocketStreamHelper { SET_OPTION_METHOD = setOptionMethod; } - static void initialize(final Socket socket, final InetSocketAddress inetSocketAddress, final SocketSettings settings, - final SslSettings sslSettings) throws IOException { - configureSocket(socket, settings); + static void initialize(final OperationContext operationContext, final Socket socket, + final InetSocketAddress inetSocketAddress, final SocketSettings settings, + final SslSettings sslSettings) throws IOException { + configureSocket(socket, operationContext, settings); configureSslSocket(socket, sslSettings, inetSocketAddress); - socket.connect(inetSocketAddress, settings.getConnectTimeout(MILLISECONDS)); + socket.connect(inetSocketAddress, operationContext.getTimeoutContext().getConnectTimeoutMs()); } - static void configureSocket(final Socket socket, final SocketSettings settings) throws SocketException { + static void configureSocket(final Socket socket, final OperationContext operationContext, final SocketSettings settings) throws SocketException { socket.setTcpNoDelay(true); - socket.setSoTimeout(settings.getReadTimeout(MILLISECONDS)); socket.setKeepAlive(true); + int readTimeoutMS = (int) operationContext.getTimeoutContext().getReadTimeoutMS(); + if (readTimeoutMS > 0) { + socket.setSoTimeout(readTimeoutMS); + } // Adding keep alive options for users of Java 11+. These options will be ignored for older Java versions. setExtendedSocketOptions(socket); diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java b/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java index 3b4eac7b48e..8a0152c9423 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java @@ -32,7 +32,6 @@ import java.nio.channels.SocketChannel; import java.nio.charset.StandardCharsets; import java.util.Arrays; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertFalse; import static com.mongodb.assertions.Assertions.assertNotNull; @@ -44,6 +43,8 @@ import static com.mongodb.internal.connection.SocksSocket.AddressType.IP_V4; import static com.mongodb.internal.connection.SocksSocket.AddressType.IP_V6; import static com.mongodb.internal.connection.SocksSocket.ServerReply.REPLY_SUCCEEDED; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      This class is not part of the public API and may be removed or changed at any time

      @@ -84,17 +85,18 @@ public void connect(final SocketAddress endpoint, final int timeoutMs) throws IO // `Socket` requires `IllegalArgumentException` isTrueArgument("timeoutMs", timeoutMs >= 0); try { - Timeout timeout = toTimeout(timeoutMs); + Timeout timeout = Timeout.expiresIn(timeoutMs, MILLISECONDS, ZERO_DURATION_MEANS_INFINITE); InetSocketAddress unresolvedAddress = (InetSocketAddress) endpoint; assertTrue(unresolvedAddress.isUnresolved()); this.remoteAddress = unresolvedAddress; InetSocketAddress proxyAddress = new InetSocketAddress(assertNotNull(proxySettings.getHost()), proxySettings.getPort()); - if (socket != null) { - socket.connect(proxyAddress, remainingMillis(timeout)); - } else { - super.connect(proxyAddress, remainingMillis(timeout)); - } + + timeout.checkedRun(MILLISECONDS, + () -> socketConnect(proxyAddress, 0), + (ms) -> socketConnect(proxyAddress, Math.toIntExact(ms)), + () -> throwSocketConnectionTimeout()); + SocksAuthenticationMethod authenticationMethod = performNegotiation(timeout); authenticate(authenticationMethod, timeout); sendConnect(timeout); @@ -114,6 +116,14 @@ public void connect(final SocketAddress endpoint, final int timeoutMs) throws IO } } + private void socketConnect(final InetSocketAddress proxyAddress, final int rem) throws IOException { + if (socket != null) { + socket.connect(proxyAddress, rem); + } else { + super.connect(proxyAddress, rem); + } + } + private void sendConnect(final Timeout timeout) throws IOException { final String host = remoteAddress.getHostName(); final int port = remoteAddress.getPort(); @@ -292,26 +302,6 @@ private SocksAuthenticationMethod[] getSocksAuthenticationMethods() { return authMethods; } - private static Timeout toTimeout(final int timeoutMs) { - if (timeoutMs == 0) { - return Timeout.infinite(); - } - return Timeout.startNow(timeoutMs, TimeUnit.MILLISECONDS); - } - - private static int remainingMillis(final Timeout timeout) throws IOException { - if (timeout.isInfinite()) { - return 0; - } - - final int remaining = Math.toIntExact(timeout.remaining(TimeUnit.MILLISECONDS)); - if (remaining > 0) { - return remaining; - } - - throw new SocketTimeoutException("Socket connection timed out"); - } - private byte[] readSocksReply(final int length, final Timeout timeout) throws IOException { InputStream inputStream = getInputStream(); byte[] data = new byte[length]; @@ -320,8 +310,14 @@ private byte[] readSocksReply(final int length, final Timeout timeout) throws IO try { while (received < length) { int count; - int remaining = remainingMillis(timeout); - setSoTimeout(remaining); + timeout.checkedRun(MILLISECONDS, () -> { + setSoTimeout(0); + }, (remainingMs) -> { + setSoTimeout(Math.toIntExact(remainingMs)); + }, () -> { + throwSocketConnectionTimeout(); + }); + count = inputStream.read(data, received, length - received); if (count < 0) { throw new ConnectException("Malformed reply from SOCKS proxy server"); @@ -334,6 +330,10 @@ private byte[] readSocksReply(final int length, final Timeout timeout) throws IO return data; } + private static void throwSocketConnectionTimeout() throws SocketTimeoutException { + throw new SocketTimeoutException("Socket connection timed out"); + } + enum SocksCommand { CONNECT(0x01); diff --git a/driver-core/src/main/com/mongodb/internal/connection/Stream.java b/driver-core/src/main/com/mongodb/internal/connection/Stream.java index b26074d218f..317927f1715 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/Stream.java +++ b/driver-core/src/main/com/mongodb/internal/connection/Stream.java @@ -31,45 +31,38 @@ public interface Stream extends BufferProvider { /** * Open the stream. * + * @param operationContext the operation context * @throws IOException if an I/O error occurs */ - void open() throws IOException; + void open(OperationContext operationContext) throws IOException; /** * Open the stream asynchronously. * - * @param handler the completion handler for opening the stream + * @param operationContext the operation context + * @param handler the completion handler for opening the stream */ - void openAsync(AsyncCompletionHandler handler); + void openAsync(OperationContext operationContext, AsyncCompletionHandler handler); /** * Write each buffer in the list to the stream in order, blocking until all are completely written. * * @param buffers the buffers to write. The operation must not {@linkplain ByteBuf#release() release} any buffer from {@code buffers}, * unless the operation {@linkplain ByteBuf#retain() retains} it, and releasing is meant to compensate for that. + * @param operationContext the operation context * @throws IOException if there are problems writing to the stream */ - void write(List buffers) throws IOException; + void write(List buffers, OperationContext operationContext) throws IOException; /** * Read from the stream, blocking until the requested number of bytes have been read. * * @param numBytes The number of bytes to read into the returned byte buffer + * @param operationContext the operation context * @return a byte buffer filled with number of bytes requested * @throws IOException if there are problems reading from the stream */ - ByteBuf read(int numBytes) throws IOException; - - /** - * Read from the stream, blocking until the requested number of bytes have been read. If supported by the implementation, - * adds the given additional timeout to the configured timeout for the stream. - * - * @param numBytes The number of bytes to read into the returned byte buffer - * @param additionalTimeout additional timeout in milliseconds to add to the configured timeout - * @return a byte buffer filled with number of bytes requested - * @throws IOException if there are problems reading from the stream - */ - ByteBuf read(int numBytes, int additionalTimeout) throws IOException; + ByteBuf read(int numBytes, OperationContext operationContext) throws IOException; /** * Write each buffer in the list to the stream in order, asynchronously. This method should return immediately, and invoke the given @@ -77,18 +70,20 @@ public interface Stream extends BufferProvider { * * @param buffers the buffers to write. The operation must not {@linkplain ByteBuf#release() release} any buffer from {@code buffers}, * unless the operation {@linkplain ByteBuf#retain() retains} it, and releasing is meant to compensate for that. + * @param operationContext the operation context * @param handler invoked when the write operation has completed */ - void writeAsync(List buffers, AsyncCompletionHandler handler); + void writeAsync(List buffers, OperationContext operationContext, AsyncCompletionHandler handler); /** * Read from the stream, asynchronously. This method should return immediately, and invoke the given callback when the number of * requested bytes have been read. * * @param numBytes the number of bytes + * @param operationContext the operation context * @param handler invoked when the read operation has completed */ - void readAsync(int numBytes, AsyncCompletionHandler handler); + void readAsync(int numBytes, OperationContext operationContext, AsyncCompletionHandler handler); /** * The address that this stream is connected to. diff --git a/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java b/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java index 8a822d03f6a..436fccb0996 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java +++ b/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java @@ -180,7 +180,7 @@ private static class TlsChannelStream extends AsynchronousChannelStream { } @Override - public void openAsync(final AsyncCompletionHandler handler) { + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { isTrue("unopened", getChannel() == null); try { SocketChannel socketChannel = SocketChannel.open(); diff --git a/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java index e80909a2c79..de74b6c8d0f 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java +++ b/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java @@ -39,7 +39,7 @@ public UnixSocketChannelStream(final UnixServerAddress address, final SocketSett } @Override - protected Socket initializeSocket() throws IOException { + protected Socket initializeSocket(final OperationContext operationContext) throws IOException { return UnixSocketChannel.open(new UnixSocketAddress(address.getHost())).socket(); } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java b/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java index f0ae4a9244e..d0ec8a6ea51 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java @@ -16,14 +16,12 @@ package com.mongodb.internal.connection; -import com.mongodb.RequestContext; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerDescription; import com.mongodb.event.ConnectionCreatedEvent; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; -import com.mongodb.internal.session.SessionContext; import org.bson.ByteBuf; import org.bson.codecs.Decoder; @@ -51,8 +49,8 @@ class UsageTrackingInternalConnection implements InternalConnection { } @Override - public void open() { - wrapped.open(); + public void open(final OperationContext operationContext) { + wrapped.open(operationContext); openedAt = System.currentTimeMillis(); lastUsedAt = openedAt; if (getDescription().getServiceId() != null) { @@ -61,8 +59,8 @@ public void open() { } @Override - public void openAsync(final SingleResultCallback callback) { - wrapped.openAsync((result, t) -> { + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { + wrapped.openAsync(operationContext, (result, t) -> { if (t != null) { callback.onResult(null, t); } else { @@ -103,35 +101,27 @@ public ByteBuf getBuffer(final int size) { } @Override - public void sendMessage(final List byteBuffers, final int lastRequestId) { - wrapped.sendMessage(byteBuffers, lastRequestId); + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { + wrapped.sendMessage(byteBuffers, lastRequestId, operationContext); lastUsedAt = System.currentTimeMillis(); } @Override - public T sendAndReceive(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext) { - T result = wrapped.sendAndReceive(message, decoder, sessionContext, requestContext, operationContext); + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + T result = wrapped.sendAndReceive(message, decoder, operationContext); lastUsedAt = System.currentTimeMillis(); return result; } @Override - public void send(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext) { - wrapped.send(message, decoder, sessionContext); + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + wrapped.send(message, decoder, operationContext); lastUsedAt = System.currentTimeMillis(); } @Override - public T receive(final Decoder decoder, final SessionContext sessionContext) { - T result = wrapped.receive(decoder, sessionContext); - lastUsedAt = System.currentTimeMillis(); - return result; - } - - @Override - public T receive(final Decoder decoder, final SessionContext sessionContext, final int additionalTimeout) { - T result = wrapped.receive(decoder, sessionContext, additionalTimeout); + public T receive(final Decoder decoder, final OperationContext operationContext) { + T result = wrapped.receive(decoder, operationContext); lastUsedAt = System.currentTimeMillis(); return result; } @@ -142,39 +132,40 @@ public boolean hasMoreToCome() { } @Override - public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, - final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext, + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, final OperationContext operationContext, final SingleResultCallback callback) { SingleResultCallback errHandlingCallback = errorHandlingCallback((result, t) -> { lastUsedAt = System.currentTimeMillis(); callback.onResult(result, t); }, LOGGER); - wrapped.sendAndReceiveAsync(message, decoder, sessionContext, requestContext, operationContext, errHandlingCallback); + wrapped.sendAndReceiveAsync(message, decoder, operationContext, errHandlingCallback); } @Override - public ResponseBuffers receiveMessage(final int responseTo) { - ResponseBuffers responseBuffers = wrapped.receiveMessage(responseTo); + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { + ResponseBuffers responseBuffers = wrapped.receiveMessage(responseTo, operationContext); lastUsedAt = System.currentTimeMillis(); return responseBuffers; } @Override - public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final SingleResultCallback callback) { + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { SingleResultCallback errHandlingCallback = errorHandlingCallback((result, t) -> { lastUsedAt = System.currentTimeMillis(); callback.onResult(result, t); }, LOGGER); - wrapped.sendMessageAsync(byteBuffers, lastRequestId, errHandlingCallback); + wrapped.sendMessageAsync(byteBuffers, lastRequestId, operationContext, errHandlingCallback); } @Override - public void receiveMessageAsync(final int responseTo, final SingleResultCallback callback) { + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { SingleResultCallback errHandlingCallback = errorHandlingCallback((result, t) -> { lastUsedAt = System.currentTimeMillis(); callback.onResult(result, t); }, LOGGER); - wrapped.receiveMessageAsync(responseTo, errHandlingCallback); + wrapped.receiveMessageAsync(responseTo, operationContext, errHandlingCallback); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java b/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java index 257ad8969d7..b5e2dd0512d 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java +++ b/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java @@ -44,13 +44,14 @@ class X509Authenticator extends Authenticator implements SpeculativeAuthenticato } @Override - void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) { + void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { if (this.speculativeAuthenticateResponse != null) { return; } try { BsonDocument authCommand = getAuthCommand(getMongoCredential().getUserName()); - executeCommand(getMongoCredential().getSource(), authCommand, getClusterConnectionMode(), getServerApi(), connection); + executeCommand(getMongoCredential().getSource(), authCommand, getClusterConnectionMode(), getServerApi(), connection, operationContext); } catch (MongoCommandException e) { throw new MongoSecurityException(getMongoCredential(), "Exception authenticating", e); } @@ -58,14 +59,14 @@ void authenticate(final InternalConnection connection, final ConnectionDescripti @Override void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { if (speculativeAuthenticateResponse != null) { callback.onResult(null, null); } else { SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); try { executeCommandAsync(getMongoCredential().getSource(), getAuthCommand(getMongoCredential().getUserName()), - getClusterConnectionMode(), getServerApi(), connection, + getClusterConnectionMode(), getServerApi(), connection, operationContext, (nonceResult, t) -> { if (t != null) { errHandlingCallback.onResult(null, translateThrowable(t)); diff --git a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java index 1f3c6ec9a1b..b28054e7d3d 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java +++ b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java @@ -27,6 +27,7 @@ import com.mongodb.connection.AsyncCompletionHandler; import com.mongodb.connection.SocketSettings; import com.mongodb.connection.SslSettings; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.Stream; import com.mongodb.lang.Nullable; import com.mongodb.spi.dns.InetAddressResolver; @@ -48,6 +49,7 @@ import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslHandler; import io.netty.handler.timeout.ReadTimeoutException; +import io.netty.handler.timeout.WriteTimeoutHandler; import org.bson.ByteBuf; import javax.net.ssl.SSLContext; @@ -59,6 +61,7 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Optional; import java.util.Queue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; @@ -67,7 +70,6 @@ import java.util.concurrent.locks.ReentrantLock; import static com.mongodb.assertions.Assertions.assertNotNull; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.internal.Locks.withLock; import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses; import static com.mongodb.internal.connection.SslHelper.enableHostNameVerification; @@ -80,7 +82,8 @@ * A Stream implementation based on Netty 4.0. * Just like it is for the {@link java.nio.channels.AsynchronousSocketChannel}, * concurrent pending1 readers - * (whether {@linkplain #read(int, int) synchronous} or {@linkplain #readAsync(int, AsyncCompletionHandler) asynchronous}) + * (whether {@linkplain #read(int, OperationContext) synchronous} or + * {@linkplain #readAsync(int, OperationContext, AsyncCompletionHandler) asynchronous}) * are not supported by {@link NettyStream}. * However, this class does not have a fail-fast mechanism checking for such situations. *
      @@ -105,7 +108,7 @@ * int1 -> inv2 -> ret2 * \--------> ret1 * } - * As shown on the diagram, the method {@link #readAsync(int, AsyncCompletionHandler)} runs concurrently with + * As shown on the diagram, the method {@link #readAsync(int, OperationContext, AsyncCompletionHandler)} runs concurrently with * itself in the example above. However, there are no concurrent pending readers because the second operation * is invoked after the first operation has completed reading despite the method has not returned yet. */ @@ -137,7 +140,6 @@ final class NettyStream implements Stream { * these fields can be plain.*/ @Nullable private ReadTimeoutTask readTimeoutTask; - private long readTimeoutMillis = NO_SCHEDULE_TIME; NettyStream(final ServerAddress address, final InetAddressResolver inetAddressResolver, final SocketSettings settings, final SslSettings sslSettings, final EventLoopGroup workerGroup, @@ -159,15 +161,14 @@ public ByteBuf getBuffer(final int size) { } @Override - public void open() throws IOException { + public void open(final OperationContext operationContext) throws IOException { FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); - openAsync(handler); + openAsync(operationContext, handler); handler.get(); } - @SuppressWarnings("deprecation") @Override - public void openAsync(final AsyncCompletionHandler handler) { + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { Queue socketAddressQueue; try { @@ -177,10 +178,11 @@ public void openAsync(final AsyncCompletionHandler handler) { return; } - initializeChannel(handler, socketAddressQueue); + initializeChannel(operationContext, handler, socketAddressQueue); } - private void initializeChannel(final AsyncCompletionHandler handler, final Queue socketAddressQueue) { + private void initializeChannel(final OperationContext operationContext, final AsyncCompletionHandler handler, + final Queue socketAddressQueue) { if (socketAddressQueue.isEmpty()) { handler.failed(new MongoSocketException("Exception opening socket", getAddress())); } else { @@ -189,8 +191,8 @@ private void initializeChannel(final AsyncCompletionHandler handler, final Bootstrap bootstrap = new Bootstrap(); bootstrap.group(workerGroup); bootstrap.channel(socketChannelClass); - - bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, settings.getConnectTimeout(MILLISECONDS)); + bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, + operationContext.getTimeoutContext().getConnectTimeoutMs()); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.SO_KEEPALIVE, true); @@ -210,46 +212,36 @@ public void initChannel(final SocketChannel ch) { addSslHandler(ch); } - int readTimeout = settings.getReadTimeout(MILLISECONDS); - if (readTimeout > NO_SCHEDULE_TIME) { - readTimeoutMillis = readTimeout; - /* We need at least one handler before (in the inbound evaluation order) the InboundBufferHandler, - * so that we can fire exception events (they are inbound events) using its context and the InboundBufferHandler - * receives them. SslHandler is not always present, so adding a NOOP handler.*/ - pipeline.addLast(new ChannelInboundHandlerAdapter()); - readTimeoutTask = new ReadTimeoutTask(pipeline.lastContext()); - } - - pipeline.addLast(new InboundBufferHandler()); + /* We need at least one handler before (in the inbound evaluation order) the InboundBufferHandler, + * so that we can fire exception events (they are inbound events) using its context and the InboundBufferHandler + * receives them. SslHandler is not always present, so adding a NOOP handler.*/ + pipeline.addLast("ChannelInboundHandlerAdapter", new ChannelInboundHandlerAdapter()); + readTimeoutTask = new ReadTimeoutTask(pipeline.lastContext()); + pipeline.addLast("InboundBufferHandler", new InboundBufferHandler()); } }); ChannelFuture channelFuture = bootstrap.connect(nextAddress); - channelFuture.addListener(new OpenChannelFutureListener(socketAddressQueue, channelFuture, handler)); + channelFuture.addListener(new OpenChannelFutureListener(operationContext, socketAddressQueue, channelFuture, handler)); } } @Override - public void write(final List buffers) throws IOException { + public void write(final List buffers, final OperationContext operationContext) throws IOException { FutureAsyncCompletionHandler future = new FutureAsyncCompletionHandler<>(); - writeAsync(buffers, future); + writeAsync(buffers, operationContext, future); future.get(); } @Override - public ByteBuf read(final int numBytes) throws IOException { - return read(numBytes, 0); - } - - @Override - public ByteBuf read(final int numBytes, final int additionalTimeoutMillis) throws IOException { - isTrueArgument("additionalTimeoutMillis must not be negative", additionalTimeoutMillis >= 0); + public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException { FutureAsyncCompletionHandler future = new FutureAsyncCompletionHandler<>(); - readAsync(numBytes, future, combinedTimeout(readTimeoutMillis, additionalTimeoutMillis)); + readAsync(numBytes, future, operationContext.getTimeoutContext().getReadTimeoutMS()); return future.get(); } @Override - public void writeAsync(final List buffers, final AsyncCompletionHandler handler) { + public void writeAsync(final List buffers, final OperationContext operationContext, + final AsyncCompletionHandler handler) { CompositeByteBuf composite = PooledByteBufAllocator.DEFAULT.compositeBuffer(); for (ByteBuf cur : buffers) { // The Netty framework releases `CompositeByteBuf` after writing @@ -260,7 +252,10 @@ public void writeAsync(final List buffers, final AsyncCompletionHandler composite.addComponent(true, ((NettyByteBuf) cur).asByteBuf().retain()); } + long writeTimeoutMS = operationContext.getTimeoutContext().getWriteTimeoutMS(); + final Optional writeTimeoutHandler = addWriteTimeoutHandler(writeTimeoutMS); channel.writeAndFlush(composite).addListener((ChannelFutureListener) future -> { + writeTimeoutHandler.map(w -> channel.pipeline().remove(w)); if (!future.isSuccess()) { handler.failed(future.cause()); } else { @@ -269,9 +264,18 @@ public void writeAsync(final List buffers, final AsyncCompletionHandler }); } + private Optional addWriteTimeoutHandler(final long writeTimeoutMS) { + if (writeTimeoutMS != NO_SCHEDULE_TIME) { + WriteTimeoutHandler writeTimeoutHandler = new WriteTimeoutHandler(writeTimeoutMS, MILLISECONDS); + channel.pipeline().addBefore("ChannelInboundHandlerAdapter", "WriteTimeoutHandler", writeTimeoutHandler); + return Optional.of(writeTimeoutHandler); + } + return Optional.empty(); + } + @Override - public void readAsync(final int numBytes, final AsyncCompletionHandler handler) { - readAsync(numBytes, handler, readTimeoutMillis); + public void readAsync(final int numBytes, final OperationContext operationContext, final AsyncCompletionHandler handler) { + readAsync(numBytes, handler, operationContext.getTimeoutContext().getReadTimeoutMS()); } /** @@ -501,9 +505,12 @@ private class OpenChannelFutureListener implements ChannelFutureListener { private final Queue socketAddressQueue; private final ChannelFuture channelFuture; private final AsyncCompletionHandler handler; + private final OperationContext operationContext; - OpenChannelFutureListener(final Queue socketAddressQueue, final ChannelFuture channelFuture, - final AsyncCompletionHandler handler) { + OpenChannelFutureListener(final OperationContext operationContext, + final Queue socketAddressQueue, final ChannelFuture channelFuture, + final AsyncCompletionHandler handler) { + this.operationContext = operationContext; this.socketAddressQueue = socketAddressQueue; this.channelFuture = channelFuture; this.handler = handler; @@ -526,7 +533,7 @@ public void operationComplete(final ChannelFuture future) { } else if (socketAddressQueue.isEmpty()) { handler.failed(new MongoSocketOpenException("Exception opening socket", getAddress(), future.cause())); } else { - initializeChannel(handler, socketAddressQueue); + initializeChannel(operationContext, handler, socketAddressQueue); } } }); @@ -539,14 +546,6 @@ private static void cancel(@Nullable final Future f) { } } - private static long combinedTimeout(final long timeout, final int additionalTimeout) { - if (timeout == NO_SCHEDULE_TIME) { - return NO_SCHEDULE_TIME; - } else { - return Math.addExact(timeout, additionalTimeout); - } - } - @Nullable private static ScheduledFuture scheduleReadTimeout(@Nullable final ReadTimeoutTask readTimeoutTask, final long timeoutMillis) { if (timeoutMillis == NO_SCHEDULE_TIME) { @@ -576,9 +575,9 @@ public void run() { } } + @Nullable private ScheduledFuture schedule(final long timeoutMillis) { - //assert timeoutMillis > 0 : timeoutMillis; - return ctx.executor().schedule(this, timeoutMillis, MILLISECONDS); + return timeoutMillis > 0 ? ctx.executor().schedule(this, timeoutMillis, MILLISECONDS) : null; } } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java index f1c87fabee5..3c845ce6d08 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java @@ -554,6 +554,9 @@ private int handshake(Optional dest, Optional ha try { writeLock.lock(); try { + if (invalid || shutdownSent) { + throw new ClosedChannelException(); + } Util.assertTrue(inPlain.nullOrEmpty()); outEncrypted.prepare(); try { diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java b/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java new file mode 100644 index 00000000000..5c178f8ed33 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.function; + +/** + *

      This class is not part of the public API and may be removed or changed at any time

      + */ +@FunctionalInterface +public interface CheckedConsumer { + + /** + * Performs this operation on the given argument. + * + * @param t the input argument + * @throws E the checked exception to throw + */ + void accept(T t) throws E; +} diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java b/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java new file mode 100644 index 00000000000..39b280aa561 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.function; + +/** + *

      This class is not part of the public API and may be removed or changed at any time

      + */ +@FunctionalInterface +public interface CheckedFunction { + + /** + * Applies the function to the given argument. + * + * @param t the function argument + * @return the function result + * @throws E the checked exception to throw + */ + R apply(T t) throws E; +} diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java b/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java new file mode 100644 index 00000000000..f5b24c28a72 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.function; + +/** + *

      This class is not part of the public API and may be removed or changed at any time

      + */ +@FunctionalInterface +public interface CheckedRunnable { + + /** + * Checked run. + * + * @throws E the checked exception to throw + */ + void run() throws E; +} diff --git a/driver-core/src/main/com/mongodb/internal/CheckedSupplier.java b/driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java similarity index 95% rename from driver-core/src/main/com/mongodb/internal/CheckedSupplier.java rename to driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java index c75145eb942..ab39e5c824a 100644 --- a/driver-core/src/main/com/mongodb/internal/CheckedSupplier.java +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.mongodb.internal; +package com.mongodb.internal.function; /** *

      This class is not part of the public API and may be removed or changed at any time

      diff --git a/driver-core/src/main/com/mongodb/internal/function/package-info.java b/driver-core/src/main/com/mongodb/internal/function/package-info.java new file mode 100644 index 00000000000..baea9b145ec --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + */ + +@NonNullApi +package com.mongodb.internal.function; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java index 13166eb53ab..bbd7ce7300e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java @@ -18,10 +18,12 @@ import com.mongodb.Function; import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; /** * An operation that aborts a transaction. @@ -47,15 +49,17 @@ protected String getCommandName() { @Override CommandCreator getCommandCreator() { - CommandCreator creator = super.getCommandCreator(); - if (recoveryToken != null) { - return (serverDescription, connectionDescription) -> creator.create(serverDescription, connectionDescription).append("recoveryToken", recoveryToken); - } - return creator; + return (operationContext, serverDescription, connectionDescription) -> { + operationContext.getTimeoutContext().resetToDefaultMaxTime(); + BsonDocument command = AbortTransactionOperation.super.getCommandCreator() + .create(operationContext, serverDescription, connectionDescription); + putIfNotNull(command, "recoveryToken", recoveryToken); + return command; + }; } @Override - protected Function getRetryCommandModifier() { + protected Function getRetryCommandModifier(final TimeoutContext timeoutContext) { return cmd -> cmd; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java index 82da3fc7646..8410a030185 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java @@ -25,12 +25,12 @@ import com.mongodb.lang.Nullable; import org.bson.BsonDocument; -import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; -import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; -import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; /** * An abstract class for defining operations for managing Atlas Search indexes. @@ -40,15 +40,17 @@ abstract class AbstractWriteSearchIndexOperation implements AsyncWriteOperation, WriteOperation { private final MongoNamespace namespace; - AbstractWriteSearchIndexOperation(final MongoNamespace mongoNamespace) { - this.namespace = mongoNamespace; + AbstractWriteSearchIndexOperation(final MongoNamespace namespace) { + this.namespace = namespace; } @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { try { - executeCommand(binding, namespace.getDatabaseName(), buildCommand(), connection, writeConcernErrorTransformer()); + executeCommand(binding, namespace.getDatabaseName(), buildCommand(), + connection, + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())); } catch (MongoCommandException mongoCommandException) { swallowOrThrow(mongoCommandException); } @@ -61,7 +63,7 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall withAsyncSourceAndConnection(binding::getWriteConnectionSource, false, callback, (connectionSource, connection, cb) -> executeCommandAsync(binding, namespace.getDatabaseName(), buildCommand(), connection, - writeConcernErrorTransformerAsync(), (result, commandExecutionError) -> { + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), (result, commandExecutionError) -> { try { swallowOrThrow(commandExecutionError); callback.onResult(result, null); diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java index 857c14b857c..07943560b40 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java @@ -18,20 +18,19 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.client.model.AggregationLevel; -import com.mongodb.internal.connection.NoOpSessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonValue; import org.bson.codecs.Decoder; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION; @@ -49,7 +48,7 @@ public AggregateOperation(final MongoNamespace namespace, final List pipeline, final Decoder decoder, - final AggregationLevel aggregationLevel) { + final AggregationLevel aggregationLevel) { this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, decoder, aggregationLevel); } @@ -75,24 +74,6 @@ public AggregateOperation batchSize(@Nullable final Integer batchSize) { return this; } - public long getMaxAwaitTime(final TimeUnit timeUnit) { - return wrapped.getMaxAwaitTime(timeUnit); - } - - public AggregateOperation maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - wrapped.maxAwaitTime(maxAwaitTime, timeUnit); - return this; - } - - public long getMaxTime(final TimeUnit timeUnit) { - return wrapped.getMaxTime(timeUnit); - } - - public AggregateOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - wrapped.maxTime(maxTime, timeUnit); - return this; - } - public Collation getCollation() { return wrapped.getCollation(); } @@ -148,6 +129,11 @@ public AggregateOperation hint(@Nullable final BsonValue hint) { return this; } + public AggregateOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + @Override public BatchCursor execute(final ReadBinding binding) { return wrapped.execute(binding); @@ -159,24 +145,22 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb } public ReadOperation asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(wrapped.getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), - resultDecoder); + return createExplainableOperation(verbosity, resultDecoder); } public AsyncReadOperation asAsyncExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(wrapped.getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), - resultDecoder); + return createExplainableOperation(verbosity, resultDecoder); } + CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { + return new CommandReadOperation<>(getNamespace().getDatabaseName(), + (operationContext, serverDescription, connectionDescription) -> + asExplainCommand(wrapped.getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder); + } MongoNamespace getNamespace() { return wrapped.getNamespace(); } - Decoder getDecoder() { - return wrapped.getDecoder(); - } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java index ff6b55bac48..7ba2c56b874 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java @@ -16,27 +16,29 @@ package com.mongodb.internal.operation; +import com.mongodb.CursorType; import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.client.model.AggregationLevel; -import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; -import org.bson.BsonInt64; import org.bson.BsonString; import org.bson.BsonValue; import org.bson.codecs.Decoder; import java.util.Arrays; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; @@ -45,6 +47,7 @@ import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; @@ -54,7 +57,6 @@ class AggregateOperationImpl implements AsyncReadOperation FIELD_NAMES_WITH_RESULT = Arrays.asList(RESULT, FIRST_BATCH); - private final MongoNamespace namespace; private final List pipeline; private final Decoder decoder; @@ -67,18 +69,21 @@ class AggregateOperationImpl implements AsyncReadOperation pipeline, final Decoder decoder, - final AggregationLevel aggregationLevel) { - this(namespace, pipeline, decoder, defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel), - notNull("namespace", namespace).getCollectionName()), defaultPipelineCreator(pipeline)); + AggregateOperationImpl(final MongoNamespace namespace, + final List pipeline, final Decoder decoder, final AggregationLevel aggregationLevel) { + this(namespace, pipeline, decoder, + defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel), + notNull("namespace", namespace).getCollectionName()), + defaultPipelineCreator(pipeline)); } - AggregateOperationImpl(final MongoNamespace namespace, final List pipeline, final Decoder decoder, - final AggregateTarget aggregateTarget, final PipelineCreator pipelineCreator) { + AggregateOperationImpl(final MongoNamespace namespace, + final List pipeline, final Decoder decoder, final AggregateTarget aggregateTarget, + final PipelineCreator pipelineCreator) { this.namespace = notNull("namespace", namespace); this.pipeline = notNull("pipeline", pipeline); this.decoder = notNull("decoder", decoder); @@ -116,30 +121,6 @@ AggregateOperationImpl batchSize(@Nullable final Integer batchSize) { return this; } - long getMaxAwaitTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS); - } - - AggregateOperationImpl maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxAwaitTime >= 0", maxAwaitTime >= 0); - this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); - return this; - } - - long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - AggregateOperationImpl maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - Collation getCollation() { return collation; } @@ -169,6 +150,19 @@ AggregateOperationImpl retryReads(final boolean retryReads) { return this; } + /** + * When {@link TimeoutContext#hasTimeoutMS()} then {@link TimeoutSettings#getMaxAwaitTimeMS()} usage in {@code getMore} commands + * depends on the type of cursor. For {@link CursorType#TailableAwait} it is used, for others it is not. + * {@link CursorType#TailableAwait} is used mainly for change streams in {@link AggregateOperationImpl}. + * + * @param cursorType + * @return this + */ + AggregateOperationImpl cursorType(final CursorType cursorType) { + this.cursorType = cursorType; + return this; + } + boolean getRetryReads() { return retryReads; } @@ -178,6 +172,13 @@ BsonValue getHint() { return hint; } + public AggregateOperationImpl timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + AggregateOperationImpl hint(@Nullable final BsonValue hint) { isTrueArgument("BsonString or BsonDocument", hint == null || hint.isDocument() || hint.isString()); this.hint = hint; @@ -186,31 +187,30 @@ AggregateOperationImpl hint(@Nullable final BsonValue hint) { @Override public BatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), transformer(), retryReads); + return executeRetryableRead(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), + transformer(), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(this.decoder, FIELD_NAMES_WITH_RESULT), asyncTransformer(), retryReads, - errHandlingCallback); + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), + asyncTransformer(), retryReads, + errHandlingCallback); } - private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion()); + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> + getCommand(operationContext, connectionDescription.getMaxWireVersion()); } - BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) { + BsonDocument getCommand(final OperationContext operationContext, final int maxWireVersion) { BsonDocument commandDocument = new BsonDocument("aggregate", aggregateTarget.create()); - - appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument); + appendReadConcernToCommand(operationContext.getSessionContext(), maxWireVersion, commandDocument); commandDocument.put("pipeline", pipelineCreator.create()); - if (maxTimeMS > 0) { - commandDocument.put("maxTimeMS", maxTimeMS > Integer.MAX_VALUE - ? new BsonInt64(maxTimeMS) : new BsonInt32((int) maxTimeMS)); - } + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); BsonDocument cursor = new BsonDocument(); if (batchSize != null) { cursor.put("batchSize", new BsonInt32(batchSize)); @@ -237,14 +237,30 @@ BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVe private CommandReadTransformer> transformer() { return (result, source, connection) -> - new CommandBatchCursor<>(result, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, - comment, source, connection); + new CommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, + getMaxTimeForCursor(source.getOperationContext().getTimeoutContext()), decoder, comment, source, connection); } private CommandReadTransformerAsync> asyncTransformer() { return (result, source, connection) -> - new AsyncCommandBatchCursor<>(result, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, - comment, source, connection); + new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, + getMaxTimeForCursor(source.getOperationContext().getTimeoutContext()), decoder, comment, source, connection); + } + + private TimeoutMode getTimeoutMode() { + TimeoutMode localTimeoutMode = timeoutMode; + if (localTimeoutMode == null) { + localTimeoutMode = TimeoutMode.CURSOR_LIFETIME; + } + return localTimeoutMode; + } + + private long getMaxTimeForCursor(final TimeoutContext timeoutContext) { + long maxAwaitTimeMS = timeoutContext.getMaxAwaitTimeMS(); + if (timeoutContext.hasTimeoutMS()){ + return CursorType.TailableAwait == cursorType ? maxAwaitTimeMS : 0; + } + return maxAwaitTimeMS; } interface AggregateTarget { diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java index f41d0e4a462..904f85042ac 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java @@ -20,6 +20,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -30,13 +31,11 @@ import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; -import org.bson.BsonInt64; import org.bson.BsonString; import org.bson.BsonValue; import org.bson.codecs.BsonDocumentCodec; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; @@ -63,35 +62,19 @@ public class AggregateToCollectionOperation implements AsyncReadOperation, private final AggregationLevel aggregationLevel; private Boolean allowDiskUse; - private long maxTimeMS; private Boolean bypassDocumentValidation; private Collation collation; private BsonValue comment; private BsonValue hint; private BsonDocument variables; - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline) { - this(namespace, pipeline, null, null, AggregationLevel.COLLECTION); - } - - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - final WriteConcern writeConcern) { - this(namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION); - } - - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - final ReadConcern readConcern) { - this(namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION); - } - - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - final ReadConcern readConcern, final WriteConcern writeConcern) { + public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, final ReadConcern readConcern, + final WriteConcern writeConcern) { this(namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION); } public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, - final AggregationLevel aggregationLevel) { + @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, final AggregationLevel aggregationLevel) { this.namespace = notNull("namespace", namespace); this.pipeline = notNull("pipeline", pipeline); this.writeConcern = writeConcern; @@ -122,17 +105,6 @@ public AggregateToCollectionOperation allowDiskUse(@Nullable final Boolean allow return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public AggregateToCollectionOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public Boolean getBypassDocumentValidation() { return bypassDocumentValidation; } @@ -174,15 +146,20 @@ public AggregateToCollectionOperation hint(@Nullable final BsonValue hint) { return this; } + public AggregateToCollectionOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + isTrueArgument("timeoutMode cannot be ITERATION.", timeoutMode == null || timeoutMode.equals(TimeoutMode.CURSOR_LIFETIME)); + return this; + } + @Override public Void execute(final ReadBinding binding) { return executeRetryableRead(binding, - () -> binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()), - namespace.getDatabaseName(), - (serverDescription, connectionDescription) -> getCommand(), - new BsonDocumentCodec(), (result, source, connection) -> { + () -> binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()), + namespace.getDatabaseName(), + getCommandCreator(), + new BsonDocumentCodec(), (result, source, connection) -> { throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), - connection.getDescription().getMaxWireVersion()); + connection.getDescription().getMaxWireVersion(), binding.getOperationContext().getTimeoutContext()); return null; }, false); } @@ -190,53 +167,51 @@ public Void execute(final ReadBinding binding) { @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { executeRetryableReadAsync(binding, - (connectionSourceCallback) -> { - binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary(), connectionSourceCallback); - }, - namespace.getDatabaseName(), - (serverDescription, connectionDescription) -> getCommand(), - new BsonDocumentCodec(), (result, source, connection) -> { + (connectionSourceCallback) -> + binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary(), connectionSourceCallback), + namespace.getDatabaseName(), + getCommandCreator(), + new BsonDocumentCodec(), (result, source, connection) -> { throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), - connection.getDescription().getMaxWireVersion()); + connection.getDescription().getMaxWireVersion(), binding.getOperationContext().getTimeoutContext()); return null; }, false, callback); } - private BsonDocument getCommand() { - BsonValue aggregationTarget = (aggregationLevel == AggregationLevel.DATABASE) - ? new BsonInt32(1) : new BsonString(namespace.getCollectionName()); - - BsonDocument commandDocument = new BsonDocument("aggregate", aggregationTarget); - commandDocument.put("pipeline", new BsonArray(pipeline)); - if (maxTimeMS > 0) { - commandDocument.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } - if (allowDiskUse != null) { - commandDocument.put("allowDiskUse", BsonBoolean.valueOf(allowDiskUse)); - } - if (bypassDocumentValidation != null) { - commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); - } - - commandDocument.put("cursor", new BsonDocument()); - - appendWriteConcernToCommand(writeConcern, commandDocument); - if (readConcern != null && !readConcern.isServerDefault()) { - commandDocument.put("readConcern", readConcern.asDocument()); - } - - if (collation != null) { - commandDocument.put("collation", collation.asDocument()); - } - if (comment != null) { - commandDocument.put("comment", comment); - } - if (hint != null) { - commandDocument.put("hint", hint); - } - if (variables != null) { - commandDocument.put("let", variables); - } - return commandDocument; + private CommandOperationHelper.CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonValue aggregationTarget = (aggregationLevel == AggregationLevel.DATABASE) + ? new BsonInt32(1) : new BsonString(namespace.getCollectionName()); + + BsonDocument commandDocument = new BsonDocument("aggregate", aggregationTarget); + commandDocument.put("pipeline", new BsonArray(pipeline)); + if (allowDiskUse != null) { + commandDocument.put("allowDiskUse", BsonBoolean.valueOf(allowDiskUse)); + } + if (bypassDocumentValidation != null) { + commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); + } + + commandDocument.put("cursor", new BsonDocument()); + + appendWriteConcernToCommand(writeConcern, commandDocument); + if (readConcern != null && !readConcern.isServerDefault()) { + commandDocument.put("readConcern", readConcern.asDocument()); + } + + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + if (comment != null) { + commandDocument.put("comment", comment); + } + if (hint != null) { + commandDocument.put("hint", hint); + } + if (variables != null) { + commandDocument.put("let", variables); + } + return commandDocument; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java index 7e55f05cac5..a4cfbafedb6 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java @@ -17,6 +17,7 @@ package com.mongodb.internal.operation; import com.mongodb.MongoException; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -42,6 +43,7 @@ final class AsyncChangeStreamBatchCursor implements AsyncAggregateResponseBatchCursor { private final AsyncReadBinding binding; + private final TimeoutContext timeoutContext; private final ChangeStreamOperation changeStreamOperation; private final int maxWireVersion; @@ -63,6 +65,7 @@ final class AsyncChangeStreamBatchCursor implements AsyncAggregateResponseBat this.wrapped = new AtomicReference<>(assertNotNull(wrapped)); this.binding = binding; binding.retain(); + this.timeoutContext = binding.getOperationContext().getTimeoutContext(); this.resumeToken = resumeToken; this.maxWireVersion = maxWireVersion; isClosed = new AtomicBoolean(); @@ -80,6 +83,7 @@ public void next(final SingleResultCallback> callback) { @Override public void close() { + timeoutContext.resetTimeoutIfPresent(); if (isClosed.compareAndSet(false, true)) { try { nullifyAndCloseWrapped(); @@ -177,6 +181,7 @@ private interface AsyncBlock { } private void resumeableOperation(final AsyncBlock asyncBlock, final SingleResultCallback> callback, final boolean tryNext) { + timeoutContext.resetTimeoutIfPresent(); SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); if (isClosed()) { errHandlingCallback.onResult(null, new MongoException(format("%s called after the cursor was closed.", @@ -219,12 +224,12 @@ private void retryOperation(final AsyncBlock asyncBlock, final SingleResultCallb changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, assertNotNull(source).getServerDescription().getMaxWireVersion()); source.release(); - changeStreamOperation.executeAsync(binding, (result, t1) -> { + changeStreamOperation.executeAsync(binding, (asyncBatchCursor, t1) -> { if (t1 != null) { callback.onResult(null, t1); } else { try { - setWrappedOrCloseIt(assertNotNull((AsyncChangeStreamBatchCursor) result).getWrapped()); + setWrappedOrCloseIt(assertNotNull((AsyncChangeStreamBatchCursor) asyncBatchCursor).getWrapped()); } finally { try { binding.release(); // release the new change stream batch cursor's reference to the binding diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java index 4831650f7ff..eec8721fbf1 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java @@ -18,13 +18,16 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoSocketException; import com.mongodb.ReadPreference; import com.mongodb.ServerAddress; import com.mongodb.ServerCursor; import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerType; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -32,6 +35,7 @@ import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.operation.AsyncOperationHelper.AsyncCallableConnectionWithCallback; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -71,6 +75,7 @@ class AsyncCommandBatchCursor implements AsyncAggregateResponseBatchCursor private volatile CommandCursorResult commandCursorResult; AsyncCommandBatchCursor( + final TimeoutMode timeoutMode, final BsonDocument commandCursorDocument, final int batchSize, final long maxTimeMS, final Decoder decoder, @@ -87,14 +92,18 @@ class AsyncCommandBatchCursor implements AsyncAggregateResponseBatchCursor this.maxWireVersion = connectionDescription.getMaxWireVersion(); this.firstBatchEmpty = commandCursorResult.getResults().isEmpty(); + connectionSource.getOperationContext().getTimeoutContext().setMaxTimeOverride(maxTimeMS); + AsyncConnection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER ? connection : null; - resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor()); + resourceManager = new ResourceManager(timeoutMode, namespace, connectionSource, connectionToPin, + commandCursorResult.getServerCursor()); } @Override public void next(final SingleResultCallback> callback) { resourceManager.execute(funcCallback -> { + resourceManager.checkTimeoutModeAndResetTimeoutContextIfIteration(); ServerCursor localServerCursor = resourceManager.getServerCursor(); boolean serverCursorIsNull = localServerCursor == null; List batchResults = emptyList(); @@ -167,10 +176,10 @@ private void getMore(final ServerCursor cursor, final SingleResultCallback> callback) { connection.commandAsync(namespace.getDatabaseName(), - getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, maxTimeMS, comment), + getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment), NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), CommandResultDocumentCodec.create(decoder, NEXT_BATCH), - assertNotNull(resourceManager.getConnectionSource()), + assertNotNull(resourceManager.getConnectionSource()).getOperationContext(), (commandResult, t) -> { if (t != null) { Throwable translatedException = @@ -207,15 +216,21 @@ private CommandCursorResult toCommandCursorResult(final ServerAddress serverA return commandCursorResult; } + void setCloseWithoutTimeoutReset(final boolean closeWithoutTimeoutReset) { + this.resourceManager.setCloseWithoutTimeoutReset(closeWithoutTimeoutReset); + } + @ThreadSafe private static final class ResourceManager extends CursorResourceManager { ResourceManager( + final TimeoutMode timeoutMode, final MongoNamespace namespace, final AsyncConnectionSource connectionSource, @Nullable final AsyncConnection connectionToPin, @Nullable final ServerCursor serverCursor) { - super(namespace, connectionSource, connectionToPin, serverCursor); + super(connectionSource.getOperationContext().getTimeoutContext(), timeoutMode, namespace, connectionSource, connectionToPin, + serverCursor); } /** @@ -250,6 +265,7 @@ void doClose() { unsetServerCursor(); } + resetTimeout(); if (getServerCursor() != null) { getConnection((connection, t) -> { if (connection != null) { @@ -271,8 +287,8 @@ void executeWithConnection(final AsyncCallableConnectionWithCallback call return; } callable.call(assertNotNull(connection), (result, t1) -> { - if (t1 instanceof MongoSocketException) { - onCorruptedConnection(connection, (MongoSocketException) t1); + if (t1 != null) { + handleException(connection, t1); } connection.release(); callback.onResult(result, t1); @@ -280,6 +296,14 @@ void executeWithConnection(final AsyncCallableConnectionWithCallback call }); } + private void handleException(final AsyncConnection connection, final Throwable exception) { + if (exception instanceof MongoOperationTimeoutException && exception.getCause() instanceof MongoSocketException) { + onCorruptedConnection(connection, (MongoSocketException) exception.getCause()); + } else if (exception instanceof MongoSocketException) { + onCorruptedConnection(connection, (MongoSocketException) exception); + } + } + private void getConnection(final SingleResultCallback callback) { assertTrue(getState() != State.IDLE); AsyncConnection pinnedConnection = getPinnedConnection(); @@ -305,9 +329,13 @@ private void releaseServerAndClientResources(final AsyncConnection connection) { private void killServerCursor(final MongoNamespace namespace, final ServerCursor localServerCursor, final AsyncConnection localConnection, final SingleResultCallback callback) { + OperationContext operationContext = assertNotNull(getConnectionSource()).getOperationContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + timeoutContext.resetToDefaultMaxTime(); + localConnection.commandAsync(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), - assertNotNull(getConnectionSource()), (r, t) -> callback.onResult(null, null)); + operationContext, (r, t) -> callback.onResult(null, null)); } } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java index b56f624bef5..35782219545 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java @@ -20,6 +20,8 @@ import com.mongodb.MongoException; import com.mongodb.ReadPreference; import com.mongodb.assertions.Assertions; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackBiFunction; @@ -132,8 +134,12 @@ static void withAsyncSuppliedResource(final Asyn errorHandlingCallback.onResult(null, supplierException); } else { Assertions.assertNotNull(resource); - AsyncCallbackSupplier curriedFunction = c -> function.apply(resource, c); - curriedFunction.whenComplete(resource::release).get(errorHandlingCallback); + try { + AsyncCallbackSupplier curriedFunction = c -> function.apply(resource, c); + curriedFunction.whenComplete(resource::release).get(errorHandlingCallback); + } catch (Exception e) { + errorHandlingCallback.onResult(null, e); + } } }); } @@ -162,8 +168,8 @@ static void executeRetryableReadAsync( final CommandReadTransformerAsync transformer, final boolean retryReads, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, binding::getReadConnectionSource, database, commandCreator, decoder, transformer, retryReads, - callback); + executeRetryableReadAsync(binding, binding::getReadConnectionSource, database, commandCreator, + decoder, transformer, retryReads, callback); } static void executeRetryableReadAsync( @@ -175,28 +181,41 @@ static void executeRetryableReadAsync( final CommandReadTransformerAsync transformer, final boolean retryReads, final SingleResultCallback callback) { - RetryState retryState = initialRetryState(retryReads); + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); binding.retain(); + OperationContext operationContext = binding.getOperationContext(); AsyncCallbackSupplier asyncRead = decorateReadWithRetriesAsync(retryState, binding.getOperationContext(), (AsyncCallbackSupplier) funcCallback -> withAsyncSourceAndConnection(sourceAsyncSupplier, false, funcCallback, (source, connection, releasingCallback) -> { if (retryState.breakAndCompleteIfRetryAnd( () -> !OperationHelper.canRetryRead(source.getServerDescription(), - binding.getSessionContext()), + operationContext), releasingCallback)) { return; } - createReadCommandAndExecuteAsync(retryState, binding, source, - database, commandCreator, - decoder, transformer, - connection, - releasingCallback); + createReadCommandAndExecuteAsync(retryState, operationContext, source, database, + commandCreator, decoder, transformer, connection, releasingCallback); }) ).whenComplete(binding::release); asyncRead.get(errorHandlingCallback(callback, OperationHelper.LOGGER)); } + static void executeCommandAsync( + final AsyncWriteBinding binding, + final String database, + final CommandCreator commandCreator, + final CommandWriteTransformerAsync transformer, + final SingleResultCallback callback) { + Assertions.notNull("binding", binding); + withAsyncSourceAndConnection(binding::getWriteConnectionSource, false, callback, + (source, connection, releasingCallback) -> + executeCommandAsync(binding, database, commandCreator.create( + binding.getOperationContext(), source.getServerDescription(), connection.getDescription()), + connection, transformer, releasingCallback) + ); + } + static void executeCommandAsync(final AsyncWriteBinding binding, final String database, final BsonDocument command, @@ -207,7 +226,7 @@ static void executeCommandAsync(final AsyncWriteBinding binding, SingleResultCallback addingRetryableLabelCallback = addingRetryableLabelCallback(callback, connection.getDescription().getMaxWireVersion()); connection.commandAsync(database, command, new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), - binding, transformingWriteCallback(transformer, connection, addingRetryableLabelCallback)); + binding.getOperationContext(), transformingWriteCallback(transformer, connection, addingRetryableLabelCallback)); } static void executeRetryableWriteAsync( @@ -220,14 +239,16 @@ static void executeRetryableWriteAsync( final CommandWriteTransformerAsync transformer, final Function retryCommandModifier, final SingleResultCallback callback) { - RetryState retryState = initialRetryState(true); + + RetryState retryState = initialRetryState(true, binding.getOperationContext().getTimeoutContext()); binding.retain(); + OperationContext operationContext = binding.getOperationContext(); - AsyncCallbackSupplier asyncWrite = decorateWriteWithRetriesAsync(retryState, binding.getOperationContext(), + AsyncCallbackSupplier asyncWrite = decorateWriteWithRetriesAsync(retryState, operationContext, (AsyncCallbackSupplier) funcCallback -> { boolean firstAttempt = retryState.isFirstAttempt(); - if (!firstAttempt && binding.getSessionContext().hasActiveTransaction()) { - binding.getSessionContext().clearTransactionContext(); + if (!firstAttempt && operationContext.getSessionContext().hasActiveTransaction()) { + operationContext.getSessionContext().clearTransactionContext(); } withAsyncSourceAndConnection(binding::getWriteConnectionSource, true, funcCallback, (source, connection, releasingCallback) -> { @@ -235,7 +256,8 @@ static void executeRetryableWriteAsync( SingleResultCallback addingRetryableLabelCallback = firstAttempt ? releasingCallback : addingRetryableLabelCallback(releasingCallback, maxWireVersion); - if (retryState.breakAndCompleteIfRetryAnd(() -> !OperationHelper.canRetryWrite(connection.getDescription(), binding.getSessionContext()), + if (retryState.breakAndCompleteIfRetryAnd(() -> + !OperationHelper.canRetryWrite(connection.getDescription(), operationContext.getSessionContext()), addingRetryableLabelCallback)) { return; } @@ -245,7 +267,10 @@ static void executeRetryableWriteAsync( .map(previousAttemptCommand -> { Assertions.assertFalse(firstAttempt); return retryCommandModifier.apply(previousAttemptCommand); - }).orElseGet(() -> commandCreator.create(source.getServerDescription(), connection.getDescription())); + }).orElseGet(() -> commandCreator.create( + operationContext, + source.getServerDescription(), + connection.getDescription())); // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true) .attach(AttachmentKeys.retryableCommandFlag(), isRetryWritesEnabled(command), true) @@ -255,8 +280,8 @@ static void executeRetryableWriteAsync( addingRetryableLabelCallback.onResult(null, t); return; } - connection.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, binding, - transformingWriteCallback(transformer, connection, addingRetryableLabelCallback)); + connection.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, + operationContext, transformingWriteCallback(transformer, connection, addingRetryableLabelCallback)); }); }).whenComplete(binding::release); @@ -265,7 +290,7 @@ static void executeRetryableWriteAsync( static void createReadCommandAndExecuteAsync( final RetryState retryState, - final AsyncReadBinding binding, + final OperationContext operationContext, final AsyncConnectionSource source, final String database, final CommandCreator commandCreator, @@ -275,14 +300,14 @@ static void createReadCommandAndExecuteAsync( final SingleResultCallback callback) { BsonDocument command; try { - command = commandCreator.create(source.getServerDescription(), connection.getDescription()); + command = commandCreator.create(operationContext, source.getServerDescription(), connection.getDescription()); retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false); } catch (IllegalArgumentException e) { callback.onResult(null, e); return; } connection.commandAsync(database, command, new NoOpFieldNameValidator(), source.getReadPreference(), decoder, - binding, transformingReadCallback(transformer, source, connection, callback)); + operationContext, transformingReadCallback(transformer, source, connection, callback)); } static AsyncCallbackSupplier decorateReadWithRetriesAsync(final RetryState retryState, final OperationContext operationContext, @@ -303,10 +328,12 @@ static AsyncCallbackSupplier decorateWriteWithRetriesAsync(final RetrySta }); } - static CommandWriteTransformerAsync writeConcernErrorTransformerAsync() { + static CommandWriteTransformerAsync writeConcernErrorTransformerAsync(final TimeoutContext timeoutContext) { return (result, connection) -> { assertNotNull(result); - throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), connection.getDescription().getMaxWireVersion()); + throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), + connection.getDescription().getMaxWireVersion(), + timeoutContext); return null; }; } @@ -316,9 +343,10 @@ static CommandReadTransformerAsync> asyncS new AsyncSingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, fieldName), 0); } - static AsyncBatchCursor cursorDocumentToAsyncBatchCursor(final BsonDocument cursorDocument, final Decoder decoder, - final BsonValue comment, final AsyncConnectionSource source, final AsyncConnection connection, final int batchSize) { - return new AsyncCommandBatchCursor<>(cursorDocument, batchSize, 0, decoder, comment, source, connection); + static AsyncBatchCursor cursorDocumentToAsyncBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument, + final int batchSize, final Decoder decoder, final BsonValue comment, final AsyncConnectionSource source, + final AsyncConnection connection) { + return new AsyncCommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection); } static SingleResultCallback releasingCallback(final SingleResultCallback wrapped, final AsyncConnection connection) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java index c266c135529..77434bd9781 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java @@ -22,6 +22,7 @@ import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.BulkWriteOptions; import com.mongodb.client.model.Collation; import com.mongodb.client.model.CountOptions; @@ -45,6 +46,7 @@ import com.mongodb.client.model.WriteModel; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.FindOptions; @@ -60,18 +62,25 @@ import java.util.List; import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      This class is not part of the public API and may be removed or changed at any time

      */ public final class AsyncOperations { private final Operations operations; + private final TimeoutSettings timeoutSettings; public AsyncOperations(final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, - final boolean retryWrites, final boolean retryReads) { - this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, + final boolean retryWrites, final boolean retryReads, final TimeoutSettings timeoutSettings) { + WriteConcern writeConcernToUse = writeConcern; + if (timeoutSettings.getTimeoutMS() != null) { + writeConcernToUse = assertNotNull(WriteConcernHelper.cloneWithoutTimeout(writeConcern)); + } + this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcernToUse, retryWrites, retryReads); + this.timeoutSettings = timeoutSettings; } public MongoNamespace getNamespace() { @@ -98,6 +107,10 @@ public WriteConcern getWriteConcern() { return operations.getWriteConcern(); } + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + public boolean isRetryWrites() { return operations.isRetryWrites(); } @@ -106,6 +119,44 @@ public boolean isRetryReads() { return operations.isRetryReads(); } + public TimeoutSettings createTimeoutSettings(final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final EstimatedDocumentCountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndDeleteOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndReplaceOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndUpdateOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + public AsyncReadOperation countDocuments(final Bson filter, final CountOptions options) { return operations.countDocuments(filter, options); } @@ -130,52 +181,52 @@ public AsyncReadOperation> find(final MongoN } public AsyncReadOperation> distinct(final String fieldName, final Bson filter, - final Class resultClass, final long maxTimeMS, - final Collation collation, final BsonValue comment) { - return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment); + final Class resultClass, final Collation collation, final BsonValue comment) { + return operations.distinct(fieldName, filter, resultClass, collation, comment); } - public AsyncExplainableReadOperation> aggregate(final List pipeline, + public AsyncExplainableReadOperation> aggregate( + final List pipeline, final Class resultClass, - final long maxTimeMS, final long maxAwaitTimeMS, - final Integer batchSize, + @Nullable final TimeoutMode timeoutMode, + @Nullable final Integer batchSize, final Collation collation, final Bson hint, final String hintString, final BsonValue comment, final Bson variables, final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { - return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, batchSize, collation, hint, hintString, comment, - variables, allowDiskUse, aggregationLevel); + return operations.aggregate(pipeline, resultClass, timeoutMode, batchSize, collation, hint, hintString, + comment, variables, allowDiskUse, aggregationLevel); } - public AsyncReadOperation aggregateToCollection(final List pipeline, final long maxTimeMS, - final Boolean allowDiskUse, final Boolean bypassDocumentValidation, + public AsyncReadOperation aggregateToCollection(final List pipeline, + @Nullable final TimeoutMode timeoutMode, final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, final Bson hint, final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { - return operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, - comment, variables, aggregationLevel); + return operations.aggregateToCollection(pipeline, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint, + hintString, comment, variables, aggregationLevel); } @SuppressWarnings("deprecation") public AsyncWriteOperation mapReduceToCollection(final String databaseName, final String collectionName, final String mapFunction, final String reduceFunction, final String finalizeFunction, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final com.mongodb.client.model.MapReduceAction action, final Boolean bypassDocumentValidation, final Collation collation) { return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, limit, - maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); + jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); } public AsyncReadOperation> mapReduce(final String mapFunction, final String reduceFunction, final String finalizeFunction, final Class resultClass, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final Collation collation) { - return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, maxTimeMS, jsMode, scope, + return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, jsMode, scope, sort, verbose, collation); } @@ -288,14 +339,9 @@ public AsyncWriteOperation dropSearchIndex(final String indexName) { } public AsyncExplainableReadOperation> listSearchIndexes(final Class resultClass, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse) { - return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, batchSize, collation, - comment, allowDiskUse); + @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { + return operations.listSearchIndexes(resultClass, indexName, batchSize, collation, comment, allowDiskUse); } public AsyncWriteOperation dropIndex(final String indexName, final DropIndexOptions options) { @@ -306,31 +352,29 @@ public AsyncWriteOperation dropIndex(final Bson keys, final DropIndexOptio return operations.dropIndex(keys, options); } - public AsyncReadOperation> listCollections(final String databaseName, final Class resultClass, - final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, - final Integer batchSize, final long maxTimeMS, - final BsonValue comment) { + public AsyncReadOperation> listCollections(final String databaseName, + final Class resultClass, final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, + @Nullable final Integer batchSize, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections, - batchSize, maxTimeMS, comment); + batchSize, comment, timeoutMode); } public AsyncReadOperation> listDatabases(final Class resultClass, final Bson filter, - final Boolean nameOnly, final long maxTimeMS, - final Boolean authorizedDatabases, final BsonValue comment) { - return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabases, comment); + final Boolean nameOnly, final Boolean authorizedDatabases, final BsonValue comment) { + return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabases, comment); } - public AsyncReadOperation> listIndexes(final Class resultClass, final Integer batchSize, - final long maxTimeMS, final BsonValue comment) { - return operations.listIndexes(resultClass, batchSize, maxTimeMS, comment); + public AsyncReadOperation> listIndexes(final Class resultClass, + @Nullable final Integer batchSize, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return operations.listIndexes(resultClass, batchSize, comment, timeoutMode); } public AsyncReadOperation> changeStream(final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, final ChangeStreamLevel changeStreamLevel, final Integer batchSize, final Collation collation, - final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, + final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, decoder, changeStreamLevel, batchSize, - collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java index 5179d3096b3..e523ee3f389 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java @@ -32,17 +32,13 @@ import org.bson.FieldNameValidator; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableWriteAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; -import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; import static com.mongodb.internal.operation.OperationHelper.validateHintForFindAndModify; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableWrite; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** * Abstract base class for findAndModify-based operations @@ -50,7 +46,6 @@ *

      This class is not part of the public API and may be removed or changed at any time

      */ public abstract class BaseFindAndModifyOperation implements AsyncWriteOperation, WriteOperation { - private final MongoNamespace namespace; private final WriteConcern writeConcern; private final boolean retryWrites; @@ -59,15 +54,14 @@ public abstract class BaseFindAndModifyOperation implements AsyncWriteOperati private BsonDocument filter; private BsonDocument projection; private BsonDocument sort; - private long maxTimeMS; private Collation collation; private BsonDocument hint; private String hintString; private BsonValue comment; private BsonDocument variables; - protected BaseFindAndModifyOperation(final MongoNamespace namespace, final WriteConcern writeConcern, - final boolean retryWrites, final Decoder decoder) { + protected BaseFindAndModifyOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder) { this.namespace = notNull("namespace", namespace); this.writeConcern = notNull("writeConcern", writeConcern); this.retryWrites = retryWrites; @@ -77,17 +71,18 @@ protected BaseFindAndModifyOperation(final MongoNamespace namespace, final Write @Override public T execute(final WriteBinding binding) { return executeRetryableWrite(binding, getDatabaseName(), null, getFieldNameValidator(), - CommandResultDocumentCodec.create(getDecoder(), "value"), - getCommandCreator(binding.getSessionContext()), - FindAndModifyHelper.transformer(), - cmd -> cmd); + CommandResultDocumentCodec.create(getDecoder(), "value"), + getCommandCreator(), + FindAndModifyHelper.transformer(), + cmd -> cmd); } @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { executeRetryableWriteAsync(binding, getDatabaseName(), null, getFieldNameValidator(), - CommandResultDocumentCodec.create(getDecoder(), "value"), - getCommandCreator(binding.getSessionContext()), FindAndModifyHelper.asyncTransformer(), cmd -> cmd, callback); + CommandResultDocumentCodec.create(getDecoder(), "value"), + getCommandCreator(), + FindAndModifyHelper.asyncTransformer(), cmd -> cmd, callback); } public MongoNamespace getNamespace() { @@ -124,17 +119,6 @@ public BaseFindAndModifyOperation projection(@Nullable final BsonDocument pro return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, MILLISECONDS); - } - - public BaseFindAndModifyOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public BsonDocument getSort() { return sort; } @@ -196,8 +180,10 @@ public BaseFindAndModifyOperation let(@Nullable final BsonDocument variables) protected abstract void specializeCommand(BsonDocument initialCommand, ConnectionDescription connectionDescription); - private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> { + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + SessionContext sessionContext = operationContext.getSessionContext(); + BsonDocument commandDocument = new BsonDocument("findAndModify", new BsonString(getNamespace().getCollectionName())); putIfNotNull(commandDocument, "query", getFilter()); putIfNotNull(commandDocument, "fields", getProjection()); @@ -205,8 +191,8 @@ private CommandCreator getCommandCreator(final SessionContext sessionContext) { specializeCommand(commandDocument, connectionDescription); - putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS)); - if (getWriteConcern().isAcknowledged() && !getWriteConcern().isServerDefault() && !sessionContext.hasActiveTransaction()) { + if (getWriteConcern().isAcknowledged() && !getWriteConcern().isServerDefault() + && !sessionContext.hasActiveTransaction()) { commandDocument.put("writeConcern", getWriteConcern().asDocument()); } if (getCollation() != null) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java index 6d6a76885be..f1551da3b2d 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java @@ -33,6 +33,7 @@ import com.mongodb.internal.bulk.WriteRequestWithIndex; import com.mongodb.internal.connection.BulkWriteBatchCombiner; import com.mongodb.internal.connection.IndexMap; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.SplittablePayload; import com.mongodb.internal.session.SessionContext; import com.mongodb.internal.validator.MappedFieldNameValidator; @@ -90,7 +91,7 @@ public final class BulkWriteBatch { private final BsonDocument command; private final SplittablePayload payload; private final List unprocessed; - private final SessionContext sessionContext; + private final OperationContext operationContext; private final BsonValue comment; private final BsonDocument variables; @@ -99,8 +100,9 @@ static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final Boolean bypassDocumentValidation, final boolean retryWrites, final List writeRequests, - final SessionContext sessionContext, + final OperationContext operationContext, @Nullable final BsonValue comment, @Nullable final BsonDocument variables) { + SessionContext sessionContext = operationContext.getSessionContext(); if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !sessionContext.hasActiveTransaction() && !writeConcern.isAcknowledged()) { throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session"); @@ -119,13 +121,13 @@ static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace, } return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, canRetryWrites, new BulkWriteBatchCombiner(connectionDescription.getServerAddress(), ordered, writeConcern), - writeRequestsWithIndex, sessionContext, comment, variables); + writeRequestsWithIndex, operationContext, comment, variables); } private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescription connectionDescription, final boolean ordered, final WriteConcern writeConcern, @Nullable final Boolean bypassDocumentValidation, final boolean retryWrites, final BulkWriteBatchCombiner bulkWriteBatchCombiner, - final List writeRequestsWithIndices, final SessionContext sessionContext, + final List writeRequestsWithIndices, final OperationContext operationContext, @Nullable final BsonValue comment, @Nullable final BsonDocument variables) { this.namespace = namespace; this.connectionDescription = connectionDescription; @@ -159,11 +161,12 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti this.indexMap = indexMap; this.unprocessed = unprocessedItems; this.payload = new SplittablePayload(getPayloadType(batchType), payloadItems); - this.sessionContext = sessionContext; + this.operationContext = operationContext; this.comment = comment; this.variables = variables; this.command = new BsonDocument(); + SessionContext sessionContext = operationContext.getSessionContext(); if (!payloadItems.isEmpty()) { command.put(getCommandName(batchType), new BsonString(namespace.getCollectionName())); command.put("ordered", new BsonBoolean(ordered)); @@ -185,7 +188,7 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti final boolean ordered, final WriteConcern writeConcern, final Boolean bypassDocumentValidation, final boolean retryWrites, final BulkWriteBatchCombiner bulkWriteBatchCombiner, final IndexMap indexMap, final WriteRequest.Type batchType, final BsonDocument command, final SplittablePayload payload, - final List unprocessed, final SessionContext sessionContext, + final List unprocessed, final OperationContext operationContext, @Nullable final BsonValue comment, @Nullable final BsonDocument variables) { this.namespace = namespace; this.connectionDescription = connectionDescription; @@ -198,11 +201,11 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti this.payload = payload; this.unprocessed = unprocessed; this.retryWrites = retryWrites; - this.sessionContext = sessionContext; + this.operationContext = operationContext; this.comment = comment; this.variables = variables; if (retryWrites) { - command.put("txnNumber", new BsonInt64(sessionContext.advanceTransactionNumber())); + command.put("txnNumber", new BsonInt64(operationContext.getSessionContext().advanceTransactionNumber())); } this.command = command; } @@ -266,11 +269,11 @@ BulkWriteBatch getNextBatch() { return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, retryWrites, - bulkWriteBatchCombiner, nextIndexMap, batchType, command, payload.getNextSplit(), unprocessed, sessionContext, + bulkWriteBatchCombiner, nextIndexMap, batchType, command, payload.getNextSplit(), unprocessed, operationContext, comment, variables); } else { return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, retryWrites, - bulkWriteBatchCombiner, unprocessed, sessionContext, comment, variables); + bulkWriteBatchCombiner, unprocessed, operationContext, comment, variables); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java index a3c134b720c..c4bd72a4775 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java @@ -18,8 +18,10 @@ import com.mongodb.MongoChangeStreamException; import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.ServerAddress; import com.mongodb.ServerCursor; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -37,26 +39,50 @@ import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.isResumableError; import static com.mongodb.internal.operation.SyncOperationHelper.withReadConnectionSource; +/** + * A change stream cursor that wraps {@link CommandBatchCursor} with automatic resumption capabilities in the event + * of timeouts or transient errors. + *

      + * Upon encountering a resumable error during {@code hasNext()}, {@code next()}, or {@code tryNext()} calls, the {@link ChangeStreamBatchCursor} + * attempts to establish a new change stream on the server. + *

      + * If an error occurring during any of these method calls is not resumable, it is immediately propagated to the caller, and the {@link ChangeStreamBatchCursor} + * is closed and invalidated on the server. Server errors that occur during this invalidation process are not propagated to the caller. + *

      + * A {@link MongoOperationTimeoutException} does not invalidate the {@link ChangeStreamBatchCursor}, but is immediately propagated to the caller. + * Subsequent method call will attempt to resume operation by establishing a new change stream on the server, without doing {@code getMore} + * request first. + *

      + */ final class ChangeStreamBatchCursor implements AggregateResponseBatchCursor { private final ReadBinding binding; private final ChangeStreamOperation changeStreamOperation; private final int maxWireVersion; - + private final TimeoutContext timeoutContext; private CommandBatchCursor wrapped; private BsonDocument resumeToken; private final AtomicBoolean closed; + /** + * This flag is used to manage change stream resumption logic after a timeout error. + * Indicates whether the last {@code hasNext()}, {@code next()}, or {@code tryNext()} call resulted in a {@link MongoOperationTimeoutException}. + * If {@code true}, indicates a timeout occurred, prompting an attempt to resume the change stream on the subsequent call. + */ + private boolean lastOperationTimedOut; + ChangeStreamBatchCursor(final ChangeStreamOperation changeStreamOperation, final CommandBatchCursor wrapped, final ReadBinding binding, @Nullable final BsonDocument resumeToken, final int maxWireVersion) { + this.timeoutContext = binding.getOperationContext().getTimeoutContext(); this.changeStreamOperation = changeStreamOperation; this.binding = binding.retain(); this.wrapped = wrapped; this.resumeToken = resumeToken; this.maxWireVersion = maxWireVersion; closed = new AtomicBoolean(); + lastOperationTimedOut = false; } CommandBatchCursor getWrapped() { @@ -107,6 +133,7 @@ public List tryNext() { @Override public void close() { if (!closed.getAndSet(true)) { + timeoutContext.resetTimeoutIfPresent(); wrapped.close(); binding.release(); } @@ -184,22 +211,50 @@ static List convertAndProduceLastId(final List rawDocume } R resumeableOperation(final Function, R> function) { + timeoutContext.resetTimeoutIfPresent(); + try { + R result = execute(function); + lastOperationTimedOut = false; + return result; + } catch (Throwable exception) { + lastOperationTimedOut = isTimeoutException(exception); + throw exception; + } + } + + private R execute(final Function, R> function) { + boolean shouldBeResumed = hasPreviousNextTimedOut(); while (true) { + if (shouldBeResumed) { + resumeChangeStream(); + } try { return function.apply(wrapped); } catch (Throwable t) { if (!isResumableError(t, maxWireVersion)) { throw MongoException.fromThrowableNonNull(t); } + shouldBeResumed = true; } - wrapped.close(); - - withReadConnectionSource(binding, source -> { - changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, source.getServerDescription().getMaxWireVersion()); - return null; - }); - wrapped = ((ChangeStreamBatchCursor) changeStreamOperation.execute(binding)).getWrapped(); - binding.release(); // release the new change stream batch cursor's reference to the binding } } + + private void resumeChangeStream() { + wrapped.close(); + + withReadConnectionSource(binding, source -> { + changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, source.getServerDescription().getMaxWireVersion()); + return null; + }); + wrapped = ((ChangeStreamBatchCursor) changeStreamOperation.execute(binding)).getWrapped(); + binding.release(); // release the new change stream batch cursor's reference to the binding + } + + private boolean hasPreviousNextTimedOut() { + return lastOperationTimedOut && !closed.get(); + } + + private static boolean isTimeoutException(final Throwable exception) { + return exception instanceof MongoOperationTimeoutException; + } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java index 148c988fe48..7cfdd474dda 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java @@ -22,6 +22,7 @@ import com.mongodb.MongoException; import com.mongodb.MongoInterruptedException; import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoSocketException; import com.mongodb.internal.VisibleForTesting; @@ -39,7 +40,8 @@ final class ChangeStreamBatchCursorHelper { static final String RESUMABLE_CHANGE_STREAM_ERROR_LABEL = "ResumableChangeStreamError"; static boolean isResumableError(final Throwable t, final int maxWireVersion) { - if (!(t instanceof MongoException) || (t instanceof MongoChangeStreamException) || (t instanceof MongoInterruptedException)) { + if (!(t instanceof MongoException) || (t instanceof MongoChangeStreamException) || (t instanceof MongoInterruptedException) + || (t instanceof MongoOperationTimeoutException)) { return false; } else if (t instanceof MongoNotPrimaryException || t instanceof MongoCursorNotFoundException || t instanceof MongoSocketException | t instanceof MongoClientException) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java index 8df093a6e9a..6231e98de12 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java @@ -16,10 +16,12 @@ package com.mongodb.internal.operation; +import com.mongodb.CursorType; import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -39,10 +41,10 @@ import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.cursor.TimeoutMode.CURSOR_LIFETIME; /** * An operation that executes an {@code $changeStream} aggregation. @@ -69,10 +71,10 @@ public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument } public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument fullDocument, - final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, - final Decoder decoder, final ChangeStreamLevel changeStreamLevel) { - this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, - getAggregateTarget(), getPipelineCreator()); + final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, + final ChangeStreamLevel changeStreamLevel) { + this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, getAggregateTarget(), + getPipelineCreator()).cursorType(CursorType.TailableAwait); this.fullDocument = notNull("fullDocument", fullDocument); this.fullDocumentBeforeChange = notNull("fullDocumentBeforeChange", fullDocumentBeforeChange); this.decoder = notNull("decoder", decoder); @@ -122,15 +124,6 @@ public ChangeStreamOperation batchSize(@Nullable final Integer batchSize) { return this; } - public long getMaxAwaitTime(final TimeUnit timeUnit) { - return wrapped.getMaxAwaitTime(timeUnit); - } - - public ChangeStreamOperation maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - wrapped.maxAwaitTime(maxAwaitTime, timeUnit); - return this; - } - public Collation getCollation() { return wrapped.getCollation(); } @@ -177,9 +170,34 @@ public ChangeStreamOperation showExpandedEvents(final boolean showExpandedEve return this; } + /** + * Gets an aggregate operation with consideration for timeout settings. + *

      + * Change streams act similarly to tailable awaitData cursors, with identical timeoutMS option behavior. + * Key distinctions include: + * - The timeoutMS option must be applied at the start of the aggregate operation for change streams. + * - Change streams support resumption on next() calls. The driver handles automatic resumption for transient errors. + *

      + * + * As a result, when {@code timeoutContext.hasTimeoutMS()} the CURSOR_LIFETIME setting is utilized to manage the underlying cursor's + * lifespan in change streams. + * + * @param timeoutContext + * @return An AggregateOperationImpl + */ + private AggregateOperationImpl getAggregateOperation(final TimeoutContext timeoutContext) { + if (timeoutContext.hasTimeoutMS()) { + return wrapped.timeoutMode(CURSOR_LIFETIME); + } + return wrapped; + } + @Override public BatchCursor execute(final ReadBinding binding) { - CommandBatchCursor cursor = (CommandBatchCursor) wrapped.execute(binding); + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + CommandBatchCursor cursor = (CommandBatchCursor) getAggregateOperation(timeoutContext).execute(binding); + cursor.setCloseWithoutTimeoutReset(true); + return new ChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding, setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(), cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()); @@ -187,11 +205,14 @@ public BatchCursor execute(final ReadBinding binding) { @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - wrapped.executeAsync(binding, (result, t) -> { + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + getAggregateOperation(timeoutContext).executeAsync(binding, (result, t) -> { if (t != null) { callback.onResult(null, t); } else { AsyncCommandBatchCursor cursor = (AsyncCommandBatchCursor) assertNotNull(result); + cursor.setCloseWithoutTimeoutReset(true); + callback.onResult(new AsyncChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding, setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(), cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()), null); diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java index f71cce0527b..410098db2c0 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java @@ -19,16 +19,20 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoException; import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoSocketException; import com.mongodb.ReadPreference; import com.mongodb.ServerAddress; import com.mongodb.ServerCursor; import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerType; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonTimestamp; @@ -57,7 +61,6 @@ class CommandBatchCursor implements AggregateResponseBatchCursor { private final MongoNamespace namespace; - private final long maxTimeMS; private final Decoder decoder; @Nullable private final BsonValue comment; @@ -71,6 +74,7 @@ class CommandBatchCursor implements AggregateResponseBatchCursor { private List nextBatch; CommandBatchCursor( + final TimeoutMode timeoutMode, final BsonDocument commandCursorDocument, final int batchSize, final long maxTimeMS, final Decoder decoder, @@ -81,14 +85,16 @@ class CommandBatchCursor implements AggregateResponseBatchCursor { this.commandCursorResult = toCommandCursorResult(connectionDescription.getServerAddress(), FIRST_BATCH, commandCursorDocument); this.namespace = commandCursorResult.getNamespace(); this.batchSize = batchSize; - this.maxTimeMS = maxTimeMS; this.decoder = decoder; this.comment = comment; this.maxWireVersion = connectionDescription.getMaxWireVersion(); this.firstBatchEmpty = commandCursorResult.getResults().isEmpty(); + connectionSource.getOperationContext().getTimeoutContext().setMaxTimeOverride(maxTimeMS); + Connection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER ? connection : null; - resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor()); + resourceManager = new ResourceManager(timeoutMode, namespace, connectionSource, connectionToPin, + commandCursorResult.getServerCursor()); } @Override @@ -101,6 +107,7 @@ private boolean doHasNext() { return true; } + resourceManager.checkTimeoutModeAndResetTimeoutContextIfIteration(); while (resourceManager.getServerCursor() != null) { getMore(); if (!resourceManager.operable()) { @@ -229,12 +236,11 @@ private void getMore() { this.commandCursorResult = toCommandCursorResult(connection.getDescription().getServerAddress(), NEXT_BATCH, assertNotNull( connection.command(namespace.getDatabaseName(), - getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, - maxTimeMS, comment), + getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment), NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), CommandResultDocumentCodec.create(decoder, NEXT_BATCH), - assertNotNull(resourceManager.getConnectionSource())))); + assertNotNull(resourceManager.getConnectionSource()).getOperationContext()))); nextServerCursor = commandCursorResult.getServerCursor(); } catch (MongoCommandException e) { throw translateCommandException(e, serverCursor); @@ -252,15 +258,27 @@ private CommandCursorResult toCommandCursorResult(final ServerAddress serverA return commandCursorResult; } + /** + * Configures the cursor's behavior to close without resetting its timeout. If {@code true}, the cursor attempts to close immediately + * without resetting its {@link TimeoutContext#getTimeout()} if present. This is useful when managing the cursor's close behavior externally. + * + * @param closeWithoutTimeoutReset + */ + void setCloseWithoutTimeoutReset(final boolean closeWithoutTimeoutReset) { + this.resourceManager.setCloseWithoutTimeoutReset(closeWithoutTimeoutReset); + } + @ThreadSafe private static final class ResourceManager extends CursorResourceManager { ResourceManager( + final TimeoutMode timeoutMode, final MongoNamespace namespace, final ConnectionSource connectionSource, @Nullable final Connection connectionToPin, @Nullable final ServerCursor serverCursor) { - super(namespace, connectionSource, connectionToPin, serverCursor); + super(connectionSource.getOperationContext().getTimeoutContext(), timeoutMode, namespace, connectionSource, connectionToPin, + serverCursor); } /** @@ -291,6 +309,7 @@ void doClose() { if (isSkipReleasingServerResourcesOnClose()) { unsetServerCursor(); } + resetTimeout(); try { if (getServerCursor() != null) { Connection connection = getConnection(); @@ -316,6 +335,12 @@ void executeWithConnection(final Consumer action) { } catch (MongoSocketException e) { onCorruptedConnection(connection, e); throw e; + } catch (MongoOperationTimeoutException e) { + Throwable cause = e.getCause(); + if (cause instanceof MongoSocketException) { + onCorruptedConnection(connection, (MongoSocketException) cause); + } + throw e; } finally { connection.release(); } @@ -344,9 +369,12 @@ private void releaseServerResources(final Connection connection) { private void killServerCursor(final MongoNamespace namespace, final ServerCursor localServerCursor, final Connection localConnection) { + OperationContext operationContext = assertNotNull(getConnectionSource()).getOperationContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + timeoutContext.resetToDefaultMaxTime(); + localConnection.command(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), - assertNotNull(getConnectionSource())); + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), operationContext); } } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java index eaf03c68ec3..cd7d2468e7f 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java @@ -51,16 +51,13 @@ final class CommandBatchCursorHelper { static BsonDocument getMoreCommandDocument( final long cursorId, final ConnectionDescription connectionDescription, final MongoNamespace namespace, final int batchSize, - final long maxTimeMS, @Nullable final BsonValue comment) { + @Nullable final BsonValue comment) { BsonDocument document = new BsonDocument("getMore", new BsonInt64(cursorId)) .append("collection", new BsonString(namespace.getCollectionName())); if (batchSize != 0) { document.append("batchSize", new BsonInt32(batchSize)); } - if (maxTimeMS != 0) { - document.append("maxTimeMS", new BsonInt64(maxTimeMS)); - } if (serverIsAtLeastVersionFourDotFour(connectionDescription)) { putIfNotNull(document, "comment", comment); } @@ -76,7 +73,7 @@ static CommandCursorResult logCommandCursorResult(final CommandCursorResu } static BsonDocument getKillCursorsCommand(final MongoNamespace namespace, final ServerCursor serverCursor) { - return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) + return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java index 3f47ba06f89..4c428131853 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java @@ -28,6 +28,7 @@ import com.mongodb.assertions.Assertions; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.function.RetryState; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException; @@ -47,9 +48,11 @@ @SuppressWarnings("overloads") final class CommandOperationHelper { - interface CommandCreator { - BsonDocument create(ServerDescription serverDescription, ConnectionDescription connectionDescription); + BsonDocument create( + OperationContext operationContext, + ServerDescription serverDescription, + ConnectionDescription connectionDescription); } static BinaryOperator onRetryableReadAttemptFailure(final OperationContext operationContext) { @@ -96,8 +99,11 @@ private static Throwable chooseRetryableWriteException( /* Read Binding Helpers */ - static RetryState initialRetryState(final boolean retry) { - return new RetryState(retry ? RetryState.RETRIES : 0); + static RetryState initialRetryState(final boolean retry, final TimeoutContext timeoutContext) { + if (retry) { + return RetryState.withRetryableState(RetryState.RETRIES, timeoutContext); + } + return RetryState.withNonRetryableState(); } private static final List RETRYABLE_ERROR_CODES = asList(6, 7, 89, 91, 189, 262, 9001, 13436, 13435, 11602, 11600, 10107); diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java index 47b807f91ec..ea89dfb303e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java @@ -34,27 +34,28 @@ */ public class CommandReadOperation implements AsyncReadOperation, ReadOperation { private final String databaseName; - private final BsonDocument command; + private final CommandCreator commandCreator; private final Decoder decoder; public CommandReadOperation(final String databaseName, final BsonDocument command, final Decoder decoder) { + this(databaseName, (operationContext, serverDescription, connectionDescription) -> command, decoder); + } + + public CommandReadOperation(final String databaseName, final CommandCreator commandCreator, final Decoder decoder) { this.databaseName = notNull("databaseName", databaseName); - this.command = notNull("command", command); + this.commandCreator = notNull("commandCreator", commandCreator); this.decoder = notNull("decoder", decoder); } @Override public T execute(final ReadBinding binding) { - return executeRetryableRead(binding, databaseName, getCommandCreator(), decoder, (result, source, connection) -> result, false); + return executeRetryableRead(binding, databaseName, commandCreator, decoder, + (result, source, connection) -> result, false); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, databaseName, getCommandCreator(), decoder, (result, source, connection) -> result, - false, callback); - } - - private CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> command; + executeRetryableReadAsync(binding, databaseName, commandCreator, decoder, + (result, source, connection) -> result, false, callback); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java index 92779bc61ae..6c2338d47de 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java @@ -25,20 +25,16 @@ import com.mongodb.MongoTimeoutException; import com.mongodb.MongoWriteConcernException; import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; -import org.bson.BsonInt32; -import org.bson.BsonInt64; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; -import static com.mongodb.assertions.Assertions.isTrueArgument; -import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.CommandOperationHelper.RETRYABLE_WRITE_ERROR_LABEL; import static java.util.Arrays.asList; @@ -52,7 +48,6 @@ public class CommitTransactionOperation extends TransactionOperation { private final boolean alreadyCommitted; private BsonDocument recoveryToken; - private Long maxCommitTimeMS; public CommitTransactionOperation(final WriteConcern writeConcern) { this(writeConcern, false); @@ -68,26 +63,6 @@ public CommitTransactionOperation recoveryToken(@Nullable final BsonDocument rec return this; } - public CommitTransactionOperation maxCommitTime(@Nullable final Long maxCommitTime, final TimeUnit timeUnit) { - if (maxCommitTime == null) { - this.maxCommitTimeMS = null; - } else { - notNull("timeUnit", timeUnit); - isTrueArgument("maxCommitTime > 0", maxCommitTime > 0); - this.maxCommitTimeMS = MILLISECONDS.convert(maxCommitTime, timeUnit); - } - return this; - } - - @Nullable - public Long getMaxCommitTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - if (maxCommitTimeMS == null) { - return null; - } - return timeUnit.convert(maxCommitTimeMS, MILLISECONDS); - } - @Override public Void execute(final WriteBinding binding) { try { @@ -143,29 +118,29 @@ protected String getCommandName() { @Override CommandCreator getCommandCreator() { - CommandCreator creator = (serverDescription, connectionDescription) -> { - BsonDocument command = CommitTransactionOperation.super.getCommandCreator().create(serverDescription, - connectionDescription); - if (maxCommitTimeMS != null) { - command.append("maxTimeMS", - maxCommitTimeMS > Integer.MAX_VALUE - ? new BsonInt64(maxCommitTimeMS) : new BsonInt32(maxCommitTimeMS.intValue())); - } + CommandCreator creator = (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = CommitTransactionOperation.super.getCommandCreator() + .create(operationContext, serverDescription, connectionDescription); + operationContext.getTimeoutContext().setMaxTimeOverrideToMaxCommitTime(); return command; }; if (alreadyCommitted) { - return (serverDescription, connectionDescription) -> getRetryCommandModifier().apply(creator.create(serverDescription, connectionDescription)); + return (operationContext, serverDescription, connectionDescription) -> + getRetryCommandModifier(operationContext.getTimeoutContext()) + .apply(creator.create(operationContext, serverDescription, connectionDescription)); } else if (recoveryToken != null) { - return (serverDescription, connectionDescription) -> creator.create(serverDescription, connectionDescription).append("recoveryToken", recoveryToken); + return (operationContext, serverDescription, connectionDescription) -> + creator.create(operationContext, serverDescription, connectionDescription) + .append("recoveryToken", recoveryToken); } return creator; } @Override - protected Function getRetryCommandModifier() { + protected Function getRetryCommandModifier(final TimeoutContext timeoutContext) { return command -> { WriteConcern retryWriteConcern = getWriteConcern().withW("majority"); - if (retryWriteConcern.getWTimeout(MILLISECONDS) == null) { + if (retryWriteConcern.getWTimeout(MILLISECONDS) == null && !timeoutContext.hasTimeoutMS()) { retryWriteConcern = retryWriteConcern.withWTimeout(10000, MILLISECONDS); } command.put("writeConcern", retryWriteConcern.asDocument()); diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java index 5cdb974b7c0..1095dd44508 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java @@ -31,7 +31,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; @@ -47,13 +46,13 @@ public class CountDocumentsOperation implements AsyncReadOperation, ReadOp private BsonValue comment; private long skip; private long limit; - private long maxTimeMS; private Collation collation; public CountDocumentsOperation(final MongoNamespace namespace) { this.namespace = notNull("namespace", namespace); } + @Nullable public BsonDocument getFilter() { return filter; } @@ -72,6 +71,7 @@ public boolean getRetryReads() { return retryReads; } + @Nullable public BsonValue getHint() { return hint; } @@ -99,17 +99,7 @@ public CountDocumentsOperation skip(final long skip) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public CountDocumentsOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - + @Nullable public Collation getCollation() { return collation; } @@ -131,8 +121,9 @@ public CountDocumentsOperation comment(@Nullable final BsonValue comment) { @Override public Long execute(final ReadBinding binding) { - BatchCursor cursor = getAggregateOperation().execute(binding); - return cursor.hasNext() ? getCountFromAggregateResults(cursor.next()) : 0; + try (BatchCursor cursor = getAggregateOperation().execute(binding)) { + return cursor.hasNext() ? getCountFromAggregateResults(cursor.next()) : 0; + } } @Override @@ -157,8 +148,7 @@ private AggregateOperation getAggregateOperation() { .retryReads(retryReads) .collation(collation) .comment(comment) - .hint(hint) - .maxTime(maxTimeMS, TimeUnit.MILLISECONDS); + .hint(hint); } private List getPipeline() { @@ -175,7 +165,7 @@ private List getPipeline() { return pipeline; } - private Long getCountFromAggregateResults(final List results) { + private Long getCountFromAggregateResults(@Nullable final List results) { if (results == null || results.isEmpty()) { return 0L; } else { diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java index 43298bae4bf..f9aa0a8eaa2 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java @@ -18,11 +18,9 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonString; @@ -30,8 +28,6 @@ import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; @@ -53,7 +49,6 @@ public class CountOperation implements AsyncReadOperation, ReadOperation callback) { - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), DECODER, - asyncTransformer(), retryReads, callback); + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), DECODER, asyncTransformer(), retryReads, callback); } private CommandReadTransformer transformer() { @@ -145,24 +129,21 @@ private CommandReadTransformerAsync asyncTransformer() { return (result, source, connection) -> (result.getNumber("n")).longValue(); } - private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription); - } - - private BsonDocument getCommand(final SessionContext sessionContext, final ConnectionDescription connectionDescription) { - BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName())); + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName())); - appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), document); + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), document); - putIfNotNull(document, "query", filter); - putIfNotZero(document, "limit", limit); - putIfNotZero(document, "skip", skip); - putIfNotNull(document, "hint", hint); - putIfNotZero(document, "maxTimeMS", maxTimeMS); + putIfNotNull(document, "query", filter); + putIfNotZero(document, "limit", limit); + putIfNotZero(document, "skip", skip); + putIfNotNull(document, "hint", hint); - if (collation != null) { - document.put("collation", collation.asDocument()); - } - return document; + if (collation != null) { + document.put("collation", collation.asDocument()); + } + return document; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java index c78fee6838e..d9a11d20287 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java @@ -92,10 +92,6 @@ public class CreateCollectionOperation implements AsyncWriteOperation, Wri private String clusteredIndexName; private BsonDocument encryptedFields; - public CreateCollectionOperation(final String databaseName, final String collectionName) { - this(databaseName, collectionName, null); - } - public CreateCollectionOperation(final String databaseName, final String collectionName, @Nullable final WriteConcern writeConcern) { this.databaseName = notNull("databaseName", databaseName); this.collectionName = notNull("collectionName", collectionName); @@ -241,7 +237,7 @@ public Void execute(final WriteBinding binding) { checkEncryptedFieldsSupported(connection.getDescription()); getCommandFunctions().forEach(commandCreator -> executeCommand(binding, databaseName, commandCreator.get(), connection, - writeConcernErrorTransformer()) + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())) ); return null; }); @@ -425,7 +421,7 @@ public void onResult(@Nullable final Void result, @Nullable final Throwable t) { finalCallback.onResult(null, null); } else { executeCommandAsync(binding, databaseName, nextCommandFunction.get(), - connection, writeConcernErrorTransformerAsync(), this); + connection, writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), this); } } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java index f3aae267b62..76de0757ff1 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java @@ -25,7 +25,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; import com.mongodb.WriteConcernResult; -import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -44,19 +43,12 @@ import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertNotNull; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; -import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; -import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; -import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static com.mongodb.internal.operation.IndexHelper.generateIndexName; -import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; -import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; @@ -69,13 +61,8 @@ public class CreateIndexesOperation implements AsyncWriteOperation, WriteO private final MongoNamespace namespace; private final List requests; private final WriteConcern writeConcern; - private long maxTimeMS; private CreateIndexCommitQuorum commitQuorum; - public CreateIndexesOperation(final MongoNamespace namespace, final List requests) { - this(namespace, requests, null); - } - public CreateIndexesOperation(final MongoNamespace namespace, final List requests, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); @@ -103,18 +90,6 @@ public List getIndexNames() { return indexNames; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public CreateIndexesOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public CreateIndexCommitQuorum getCommitQuorum() { return commitQuorum; } @@ -126,34 +101,25 @@ public CreateIndexesOperation commitQuorum(@Nullable final CreateIndexCommitQuor @Override public Void execute(final WriteBinding binding) { - return withConnection(binding, connection -> { - try { - executeCommand(binding, namespace.getDatabaseName(), getCommand(connection.getDescription()), - connection, writeConcernErrorTransformer()); - } catch (MongoCommandException e) { - throw checkForDuplicateKeyError(e); - } - return null; - }); + try { + return executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformer( + binding.getOperationContext().getTimeoutContext())); + } catch (MongoCommandException e) { + throw checkForDuplicateKeyError(e); + } } @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { - withAsyncConnection(binding, (connection, t) -> { - SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); - if (t != null) { - errHandlingCallback.onResult(null, t); - } else { - SingleResultCallback wrappedCallback = releasingCallback(errHandlingCallback, connection); - try { - executeCommandAsync(binding, namespace.getDatabaseName(), - getCommand(connection.getDescription()), connection, writeConcernErrorTransformerAsync(), - (result, t12) -> wrappedCallback.onResult(null, translateException(t12))); - } catch (Throwable t1) { - wrappedCallback.onResult(null, t1); - } - } - }); + executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformerAsync(binding + .getOperationContext().getTimeoutContext()), + ((result, t) -> { + if (t != null) { + callback.onResult(null, translateException(t)); + } else { + callback.onResult(result, null); + } + })); } @SuppressWarnings("deprecation") @@ -221,24 +187,25 @@ private BsonDocument getIndex(final IndexRequest request) { return index; } - private BsonDocument getCommand(final ConnectionDescription description) { - BsonDocument command = new BsonDocument("createIndexes", new BsonString(namespace.getCollectionName())); - List values = new ArrayList<>(); - for (IndexRequest request : requests) { - values.add(getIndex(request)); - } - command.put("indexes", new BsonArray(values)); - putIfNotZero(command, "maxTimeMS", maxTimeMS); - appendWriteConcernToCommand(writeConcern, command); - if (commitQuorum != null) { - if (serverIsAtLeastVersionFourDotFour(description)) { - command.put("commitQuorum", commitQuorum.toBsonValue()); - } else { - throw new MongoClientException("Specifying a value for the create index commit quorum option " - + "requires a minimum MongoDB version of 4.4"); + private CommandOperationHelper.CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument("createIndexes", new BsonString(namespace.getCollectionName())); + List values = new ArrayList<>(); + for (IndexRequest request : requests) { + values.add(getIndex(request)); } - } - return command; + command.put("indexes", new BsonArray(values)); + appendWriteConcernToCommand(writeConcern, command); + if (commitQuorum != null) { + if (serverIsAtLeastVersionFourDotFour(connectionDescription)) { + command.put("commitQuorum", commitQuorum.toBsonValue()); + } else { + throw new MongoClientException("Specifying a value for the create index commit quorum option " + + "requires a minimum MongoDB version of 4.4"); + } + } + return command; + }; } @Nullable diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java index 8d1e98de6b8..3636db08593 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java @@ -55,7 +55,7 @@ public class CreateViewOperation implements AsyncWriteOperation, WriteOper private Collation collation; public CreateViewOperation(final String databaseName, final String viewName, final String viewOn, final List pipeline, - final WriteConcern writeConcern) { + final WriteConcern writeConcern) { this.databaseName = notNull("databaseName", databaseName); this.viewName = notNull("viewName", viewName); this.viewOn = notNull("viewOn", viewOn); @@ -127,7 +127,7 @@ public CreateViewOperation collation(@Nullable final Collation collation) { public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { executeCommand(binding, databaseName, getCommand(), new BsonDocumentCodec(), - writeConcernErrorTransformer()); + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())); return null; }); } @@ -140,7 +140,8 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall errHandlingCallback.onResult(null, t); } else { SingleResultCallback wrappedCallback = releasingCallback(errHandlingCallback, connection); - executeCommandAsync(binding, databaseName, getCommand(), connection, writeConcernErrorTransformerAsync(), + executeCommandAsync(binding, databaseName, getCommand(), connection, + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), wrappedCallback); } }); diff --git a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java index 7aeaad49118..78529cfda44 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java @@ -20,6 +20,8 @@ import com.mongodb.MongoSocketException; import com.mongodb.ServerCursor; import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.binding.ReferenceCounted; import com.mongodb.internal.connection.Connection; import com.mongodb.lang.Nullable; @@ -54,6 +56,8 @@ @ThreadSafe abstract class CursorResourceManager { private final Lock lock; + private final TimeoutContext timeoutContext; + private final TimeoutMode timeoutMode; private final MongoNamespace namespace; private volatile State state; @Nullable @@ -63,13 +67,18 @@ abstract class CursorResourceManager implements AsyncReadOperation>, ReadOperation> { private static final String VALUES = "values"; - private final MongoNamespace namespace; private final String fieldName; private final Decoder decoder; private boolean retryReads; private BsonDocument filter; - private long maxTimeMS; private Collation collation; private BsonValue comment; @@ -86,17 +79,6 @@ public boolean getRetryReads() { return retryReads; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public DistinctOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public Collation getCollation() { return collation; } @@ -117,34 +99,32 @@ public DistinctOperation comment(final BsonValue comment) { @Override public BatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - createCommandDecoder(), singleBatchCursorTransformer(VALUES), retryReads); + return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(), + singleBatchCursorTransformer(VALUES), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - createCommandDecoder(), asyncSingleBatchCursorTransformer(VALUES), retryReads, errorHandlingCallback(callback, LOGGER)); + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), createCommandDecoder(), asyncSingleBatchCursorTransformer(VALUES), retryReads, + errorHandlingCallback(callback, LOGGER)); } private Codec createCommandDecoder() { return CommandResultDocumentCodec.create(decoder, VALUES); } - private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription); - } - - private BsonDocument getCommand(final SessionContext sessionContext, final ConnectionDescription connectionDescription) { - BsonDocument commandDocument = new BsonDocument("distinct", new BsonString(namespace.getCollectionName())); - appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), commandDocument); - commandDocument.put("key", new BsonString(fieldName)); - putIfNotNull(commandDocument, "query", filter); - putIfNotZero(commandDocument, "maxTimeMS", maxTimeMS); - if (collation != null) { - commandDocument.put("collation", collation.asDocument()); - } - putIfNotNull(commandDocument, "comment", comment); - return commandDocument; + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument("distinct", new BsonString(namespace.getCollectionName())); + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), commandDocument); + commandDocument.put("key", new BsonString(fieldName)); + putIfNotNull(commandDocument, "query", filter); + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java index d0e73948339..46a66fcf28e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java @@ -59,6 +59,12 @@ static void putIfNotNull(final BsonDocument command, final String key, @Nullable } } + static void putIfNotNull(final BsonDocument command, final String key, @Nullable final Boolean value) { + if (value != null) { + command.put(key, new BsonBoolean(value)); + } + } + static void putIfNotZero(final BsonDocument command, final String key, final int value) { if (value != 0) { command.put(key, new BsonInt32(value)); diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java index 6ddc087bdee..d879f83e542 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java @@ -18,6 +18,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.WriteConcern; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadWriteBinding; @@ -66,10 +67,6 @@ public class DropCollectionOperation implements AsyncWriteOperation, Write private BsonDocument encryptedFields; private boolean autoEncryptedFields; - public DropCollectionOperation(final MongoNamespace namespace) { - this(namespace, null); - } - public DropCollectionOperation(final MongoNamespace namespace, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); this.writeConcern = writeConcern; @@ -96,7 +93,7 @@ public Void execute(final WriteBinding binding) { getCommands(localEncryptedFields).forEach(command -> { try { executeCommand(binding, namespace.getDatabaseName(), command.get(), - connection, writeConcernErrorTransformer()); + connection, writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())); } catch (MongoCommandException e) { rethrowIfNotNamespaceError(e); } @@ -251,8 +248,12 @@ public void onResult(@Nullable final Void result, @Nullable final Throwable t) { if (nextCommandFunction == null) { finalCallback.onResult(null, null); } else { - executeCommandAsync(binding, namespace.getDatabaseName(), nextCommandFunction.get(), - connection, writeConcernErrorTransformerAsync(), this); + try { + executeCommandAsync(binding, namespace.getDatabaseName(), nextCommandFunction.get(), + connection, writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), this); + } catch (MongoOperationTimeoutException operationTimeoutException) { + finalCallback.onResult(null, operationTimeoutException); + } } } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java index 2dad7dda177..9dd942cb726 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java @@ -46,10 +46,6 @@ public class DropDatabaseOperation implements AsyncWriteOperation, WriteOp private final String databaseName; private final WriteConcern writeConcern; - public DropDatabaseOperation(final String databaseName) { - this(databaseName, null); - } - public DropDatabaseOperation(final String databaseName, @Nullable final WriteConcern writeConcern) { this.databaseName = notNull("databaseName", databaseName); this.writeConcern = writeConcern; @@ -62,7 +58,8 @@ public WriteConcern getWriteConcern() { @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { - executeCommand(binding, databaseName, getCommand(), connection, writeConcernErrorTransformer()); + executeCommand(binding, databaseName, getCommand(), connection, writeConcernErrorTransformer(binding.getOperationContext() + .getTimeoutContext())); return null; }); } @@ -75,7 +72,8 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall errHandlingCallback.onResult(null, t); } else { executeCommandAsync(binding, databaseName, getCommand(), connection, - writeConcernErrorTransformerAsync(), releasingCallback(errHandlingCallback, connection)); + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), + releasingCallback(errHandlingCallback, connection)); } }); diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java index 66bb8f408fb..e66a4e10bbf 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java @@ -26,21 +26,12 @@ import org.bson.BsonDocument; import org.bson.BsonString; -import java.util.concurrent.TimeUnit; - -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; -import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; -import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; -import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; -import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; -import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; @@ -54,15 +45,6 @@ public class DropIndexOperation implements AsyncWriteOperation, WriteOpera private final String indexName; private final BsonDocument indexKeys; private final WriteConcern writeConcern; - private long maxTimeMS; - - public DropIndexOperation(final MongoNamespace namespace, final String indexName) { - this(namespace, indexName, null); - } - - public DropIndexOperation(final MongoNamespace namespace, final BsonDocument keys) { - this(namespace, keys, null); - } public DropIndexOperation(final MongoNamespace namespace, final String indexName, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); @@ -82,61 +64,40 @@ public WriteConcern getWriteConcern() { return writeConcern; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public DropIndexOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - @Override public Void execute(final WriteBinding binding) { - return withConnection(binding, connection -> { - try { - executeCommand(binding, namespace.getDatabaseName(), getCommand(), connection, - writeConcernErrorTransformer()); - } catch (MongoCommandException e) { - rethrowIfNotNamespaceError(e); - } - return null; - }); + try { + executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformer(binding + .getOperationContext() + .getTimeoutContext())); + } catch (MongoCommandException e) { + rethrowIfNotNamespaceError(e); + } + return null; } @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { - withAsyncConnection(binding, (connection, t) -> { - SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); - if (t != null) { - errHandlingCallback.onResult(null, t); + executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), (result, t) -> { + if (t != null && !isNamespaceError(t)) { + callback.onResult(null, t); } else { - SingleResultCallback releasingCallback = releasingCallback(errHandlingCallback, connection); - executeCommandAsync(binding, namespace.getDatabaseName(), getCommand(), - connection, writeConcernErrorTransformerAsync(), (result, t1) -> { - if (t1 != null && !isNamespaceError(t1)) { - releasingCallback.onResult(null, t1); - } else { - releasingCallback.onResult(result, null); - } - }); + callback.onResult(null, null); } }); } - private BsonDocument getCommand() { - BsonDocument command = new BsonDocument("dropIndexes", new BsonString(namespace.getCollectionName())); - if (indexName != null) { - command.put("index", new BsonString(indexName)); - } else { - command.put("index", indexKeys); - } - - putIfNotZero(command, "maxTimeMS", maxTimeMS); - appendWriteConcernToCommand(writeConcern, command); - return command; + private CommandOperationHelper.CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument("dropIndexes", new BsonString(namespace.getCollectionName())); + if (indexName != null) { + command.put("index", new BsonString(indexName)); + } else { + command.put("index", indexKeys); + } + appendWriteConcernToCommand(writeConcern, command); + return command; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java index 571de884582..17f7e617405 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java @@ -22,7 +22,6 @@ import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonString; @@ -30,8 +29,6 @@ import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -39,7 +36,6 @@ import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; -import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; @@ -52,7 +48,6 @@ public class EstimatedDocumentCountOperation implements AsyncReadOperation private static final Decoder DECODER = new BsonDocumentCodec(); private final MongoNamespace namespace; private boolean retryReads; - private long maxTimeMS; private BsonValue comment; public EstimatedDocumentCountOperation(final MongoNamespace namespace) { @@ -64,12 +59,6 @@ public EstimatedDocumentCountOperation retryReads(final boolean retryReads) { return this; } - public EstimatedDocumentCountOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - @Nullable public BsonValue getComment() { return comment; @@ -83,8 +72,9 @@ public EstimatedDocumentCountOperation comment(@Nullable final BsonValue comment @Override public Long execute(final ReadBinding binding) { try { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), transformer(), retryReads); + return executeRetryableRead(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), + transformer(), retryReads); } catch (MongoCommandException e) { return assertNotNull(rethrowIfNotNamespaceError(e, 0L)); } @@ -92,9 +82,10 @@ public Long execute(final ReadBinding binding) { @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), asyncTransformer(), retryReads, - (result, t) -> { + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), + asyncTransformer(), retryReads, + (result, t) -> { if (isNamespaceError(t)) { callback.onResult(0L, null); } else { @@ -115,11 +106,10 @@ private long transformResult(final BsonDocument result, final ConnectionDescript return (result.getNumber("n")).longValue(); } - private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> { + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName())); - appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), document); - putIfNotZero(document, "maxTimeMS", maxTimeMS); + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), document); if (comment != null) { document.put("comment", comment); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java index 928173ba2fb..c284b942fe4 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java @@ -28,8 +28,6 @@ import org.bson.FieldNameValidator; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - /** * An operation that atomically finds and deletes a single document. * @@ -38,7 +36,7 @@ public class FindAndDeleteOperation extends BaseFindAndModifyOperation { public FindAndDeleteOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder) { + final Decoder decoder) { super(namespace, writeConcern, retryWrites, decoder); } @@ -54,12 +52,6 @@ public FindAndDeleteOperation projection(@Nullable final BsonDocument project return this; } - @Override - public FindAndDeleteOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - super.maxTime(maxTime, timeUnit); - return this; - } - @Override public FindAndDeleteOperation sort(@Nullable final BsonDocument sort) { super.sort(sort); diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java index 303d9c0e208..3c143fdde36 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java @@ -32,7 +32,6 @@ import java.util.HashMap; import java.util.Map; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; @@ -49,7 +48,7 @@ public class FindAndReplaceOperation extends BaseFindAndModifyOperation { private Boolean bypassDocumentValidation; public FindAndReplaceOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder, final BsonDocument replacement) { + final Decoder decoder, final BsonDocument replacement) { super(namespace, writeConcern, retryWrites, decoder); this.replacement = notNull("replacement", replacement); } @@ -97,12 +96,6 @@ public FindAndReplaceOperation projection(@Nullable final BsonDocument projec return this; } - @Override - public FindAndReplaceOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - super.maxTime(maxTime, timeUnit); - return this; - } - @Override public FindAndReplaceOperation sort(@Nullable final BsonDocument sort) { super.sort(sort); diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java index 2c2a00ff437..46e1994985c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java @@ -34,7 +34,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; @@ -53,15 +52,15 @@ public class FindAndUpdateOperation extends BaseFindAndModifyOperation { private Boolean bypassDocumentValidation; private List arrayFilters; - public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder, final BsonDocument update) { + public FindAndUpdateOperation(final MongoNamespace namespace, + final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final BsonDocument update) { super(namespace, writeConcern, retryWrites, decoder); this.update = notNull("update", update); this.updatePipeline = null; } public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder, final List update) { + final Decoder decoder, final List update) { super(namespace, writeConcern, retryWrites, decoder); this.updatePipeline = update; this.update = null; @@ -125,12 +124,6 @@ public FindAndUpdateOperation projection(@Nullable final BsonDocument project return this; } - @Override - public FindAndUpdateOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - super.maxTime(maxTime, timeUnit); - return this; - } - @Override public FindAndUpdateOperation sort(@Nullable final BsonDocument sort) { super.sort(sort); diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java index fa5aa9af1be..514e48b4db8 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java @@ -21,6 +21,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.MongoQueryException; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -28,21 +29,17 @@ import com.mongodb.internal.async.function.RetryState; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.connection.NoOpSessionContext; -import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.lang.Nullable; import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; -import org.bson.BsonInt64; import org.bson.BsonString; import org.bson.BsonValue; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -56,6 +53,7 @@ import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; @@ -78,8 +76,6 @@ public class FindOperation implements AsyncExplainableReadOperation implements AsyncExplainableReadOperation decoder) { this.namespace = notNull("namespace", namespace); @@ -144,30 +141,6 @@ public FindOperation projection(@Nullable final BsonDocument projection) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public FindOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - - public long getMaxAwaitTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS); - } - - public FindOperation maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxAwaitTime >= 0", maxAwaitTime >= 0); - this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); - return this; - } - public int getSkip() { return skip; } @@ -195,6 +168,13 @@ public FindOperation cursorType(final CursorType cursorType) { return this; } + public FindOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + public boolean isNoCursorTimeout() { return noCursorTimeout; } @@ -305,14 +285,19 @@ public FindOperation allowDiskUse(@Nullable final Boolean allowDiskUse) { @Override public BatchCursor execute(final ReadBinding binding) { - RetryState retryState = initialRetryState(retryReads); + IllegalStateException invalidTimeoutModeException = invalidTimeoutModeException(); + if (invalidTimeoutModeException != null) { + throw invalidTimeoutModeException; + } + + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); Supplier> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { - retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); try { - return createReadCommandAndExecute(retryState, binding, source, namespace.getDatabaseName(), - getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, FIRST_BATCH), - transformer(), connection); + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIRST_BATCH), + transformer(), connection); } catch (MongoCommandException e) { throw new MongoQueryException(e.getResponse(), e.getServerAddress()); } @@ -321,22 +306,28 @@ public BatchCursor execute(final ReadBinding binding) { return read.get(); } - @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - RetryState retryState = initialRetryState(retryReads); + IllegalStateException invalidTimeoutModeException = invalidTimeoutModeException(); + if (invalidTimeoutModeException != null) { + callback.onResult(null, invalidTimeoutModeException); + return; + } + + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); binding.retain(); AsyncCallbackSupplier> asyncRead = decorateReadWithRetriesAsync( retryState, binding.getOperationContext(), (AsyncCallbackSupplier>) funcCallback -> withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback, (source, connection, releasingCallback) -> { if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), - binding.getSessionContext()), releasingCallback)) { + binding.getOperationContext()), releasingCallback)) { return; } SingleResultCallback> wrappedCallback = exceptionTransformingCallback(releasingCallback); - createReadCommandAndExecuteAsync(retryState, binding, source, namespace.getDatabaseName(), - getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, FIRST_BATCH), + createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source, + namespace.getDatabaseName(), getCommandCreator(), + CommandResultDocumentCodec.create(decoder, FIRST_BATCH), asyncTransformer(), connection, wrappedCallback); }) ).whenComplete(binding::release); @@ -362,23 +353,25 @@ private static SingleResultCallback exceptionTransformingCallback(final S @Override public ReadOperation asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), - resultDecoder); + return createExplainableOperation(verbosity, resultDecoder); } @Override public AsyncReadOperation asAsyncExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { + return createExplainableOperation(verbosity, resultDecoder); + } + + CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), - resultDecoder); + (operationContext, serverDescription, connectionDescription) -> + asExplainCommand(getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder); } - private BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) { + private BsonDocument getCommand(final OperationContext operationContext, final int maxWireVersion) { BsonDocument commandDocument = new BsonDocument("find", new BsonString(namespace.getCollectionName())); - appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument); + appendReadConcernToCommand(operationContext.getSessionContext(), maxWireVersion, commandDocument); putIfNotNull(commandDocument, "filter", filter); putIfNotNullOrEmpty(commandDocument, "sort", sort); @@ -399,15 +392,17 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m if (limit < 0 || batchSize < 0) { commandDocument.put("singleBatch", BsonBoolean.TRUE); } - if (maxTimeMS > 0) { - commandDocument.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } if (isTailableCursor()) { commandDocument.put("tailable", BsonBoolean.TRUE); + if (isAwaitData()) { + commandDocument.put("awaitData", BsonBoolean.TRUE); + } else { + operationContext.getTimeoutContext().setMaxTimeOverride(0L); + } + } else { + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); } - if (isAwaitData()) { - commandDocument.put("awaitData", BsonBoolean.TRUE); - } + if (noCursorTimeout) { commandDocument.put("noCursorTimeout", BsonBoolean.TRUE); } @@ -444,8 +439,9 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m return commandDocument; } - private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion()); + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> + getCommand(operationContext, connectionDescription.getMaxWireVersion()); } private boolean isTailableCursor() { @@ -456,17 +452,36 @@ private boolean isAwaitData() { return cursorType == CursorType.TailableAwait; } - private CommandReadTransformer> transformer() { - return (result, source, connection) -> - new CommandBatchCursor<>(result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection); + private TimeoutMode getTimeoutMode() { + if (timeoutMode == null) { + return isTailableCursor() ? TimeoutMode.ITERATION : TimeoutMode.CURSOR_LIFETIME; + } + return timeoutMode; } - private long getMaxTimeForCursor() { - return cursorType == CursorType.TailableAwait ? maxAwaitTimeMS : 0; + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + new CommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder, + comment, source, connection); } private CommandReadTransformerAsync> asyncTransformer() { return (result, source, connection) -> - new AsyncCommandBatchCursor<>(result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection); + new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder, + comment, source, connection); + } + + private long getMaxTimeForCursor(final OperationContext operationContext) { + return cursorType == CursorType.TailableAwait ? operationContext.getTimeoutContext().getMaxAwaitTimeMS() : 0; + } + + @Nullable + private IllegalStateException invalidTimeoutModeException() { + if (isTailableCursor()) { + if (timeoutMode == TimeoutMode.CURSOR_LIFETIME) { + return new IllegalStateException("Tailable cursors only support the ITERATION value for the timeoutMode option."); + } + } + return null; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java index 5883d68ae18..73abe905aea 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java @@ -17,7 +17,7 @@ package com.mongodb.internal.operation; import com.mongodb.MongoCommandException; -import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -26,15 +26,12 @@ import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; -import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; -import org.bson.BsonInt64; import org.bson.BsonValue; import org.bson.codecs.Codec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.notNull; @@ -46,6 +43,7 @@ import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; @@ -54,6 +52,7 @@ import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.createReadCommandAndExecute; @@ -76,10 +75,10 @@ public class ListCollectionsOperation implements AsyncReadOperation decoder) { this.databaseName = notNull("databaseName", databaseName); @@ -113,17 +112,6 @@ public ListCollectionsOperation batchSize(final int batchSize) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public ListCollectionsOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public ListCollectionsOperation retryReads(final boolean retryReads) { this.retryReads = retryReads; return this; @@ -157,15 +145,27 @@ public boolean isAuthorizedCollections() { return authorizedCollections; } + + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + public ListCollectionsOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + @Override public BatchCursor execute(final ReadBinding binding) { - RetryState retryState = initialRetryState(retryReads); + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); Supplier> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { - retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); try { - return createReadCommandAndExecute(retryState, binding, source, databaseName, getCommandCreator(), - createCommandDecoder(), commandTransformer(), connection); + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, databaseName, + getCommandCreator(), createCommandDecoder(), transformer(), connection); } catch (MongoCommandException e) { return rethrowIfNotNamespaceError(e, createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize)); @@ -177,18 +177,19 @@ public BatchCursor execute(final ReadBinding binding) { @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - RetryState retryState = initialRetryState(retryReads); + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); binding.retain(); AsyncCallbackSupplier> asyncRead = decorateReadWithRetriesAsync( retryState, binding.getOperationContext(), (AsyncCallbackSupplier>) funcCallback -> withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback, (source, connection, releasingCallback) -> { if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), - binding.getSessionContext()), releasingCallback)) { + binding.getOperationContext()), releasingCallback)) { return; } - createReadCommandAndExecuteAsync(retryState, binding, source, databaseName, getCommandCreator(), createCommandDecoder(), - asyncTransformer(), connection, (result, t) -> { + createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source, databaseName, + getCommandCreator(), createCommandDecoder(), asyncTransformer(), connection, + (result, t) -> { if (t != null && !isNamespaceError(t)) { releasingCallback.onResult(null, t); } else { @@ -201,37 +202,28 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb asyncRead.get(errorHandlingCallback(callback, LOGGER)); } - private MongoNamespace createNamespace() { - return new MongoNamespace(databaseName, "$cmd.listCollections"); + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + cursorDocumentToBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); } private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result, decoder, comment, source, connection, batchSize); - } - - private CommandReadTransformer> commandTransformer() { - return (result, source, connection) -> cursorDocumentToBatchCursor(result, decoder, comment, source, connection, batchSize); - } - - private CommandOperationHelper.CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> getCommand(); - } - - private BsonDocument getCommand() { - BsonDocument command = new BsonDocument("listCollections", new BsonInt32(1)) - .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); - if (filter != null) { - command.append("filter", filter); - } - if (nameOnly) { - command.append("nameOnly", BsonBoolean.TRUE); - } - putIfTrue(command, "authorizedCollections", authorizedCollections); - if (maxTimeMS > 0) { - command.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } - putIfNotNull(command, "comment", comment); - return command; + return (result, source, connection) -> + cursorDocumentToAsyncBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); + } + + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument("listCollections", new BsonInt32(1)) + .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); + putIfNotNull(commandDocument, "filter", filter); + putIfTrue(commandDocument, "nameOnly", nameOnly); + putIfTrue(commandDocument, "authorizedCollections", authorizedCollections); + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; } private Codec createCommandDecoder() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java index fec689c938f..5f61c9192dd 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java @@ -16,21 +16,16 @@ package com.mongodb.internal.operation; - import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; -import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; -import org.bson.BsonInt64; import org.bson.BsonValue; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.asyncSingleBatchCursorTransformer; @@ -48,13 +43,9 @@ *

      This class is not part of the public API and may be removed or changed at any time

      */ public class ListDatabasesOperation implements AsyncReadOperation>, ReadOperation> { - private static final String DATABASES = "databases"; - private final Decoder decoder; private boolean retryReads; - - private long maxTimeMS; private BsonDocument filter; private Boolean nameOnly; private Boolean authorizedDatabasesOnly; @@ -64,17 +55,6 @@ public ListDatabasesOperation(final Decoder decoder) { this.decoder = notNull("decoder", decoder); } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public ListDatabasesOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public ListDatabasesOperation filter(@Nullable final BsonDocument filter) { this.filter = filter; return this; @@ -123,38 +103,24 @@ public ListDatabasesOperation comment(@Nullable final BsonValue comment) { @Override public BatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, "admin", getCommandCreator(), - CommandResultDocumentCodec.create(decoder, DATABASES), + return executeRetryableRead(binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, DATABASES), singleBatchCursorTransformer(DATABASES), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - executeRetryableReadAsync(binding, "admin", getCommandCreator(), - CommandResultDocumentCodec.create(decoder, DATABASES), - asyncSingleBatchCursorTransformer(DATABASES), retryReads, - errorHandlingCallback(callback, LOGGER)); + executeRetryableReadAsync(binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, DATABASES), + asyncSingleBatchCursorTransformer(DATABASES), retryReads, errorHandlingCallback(callback, LOGGER)); } private CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> getCommand(); - } - - private BsonDocument getCommand() { - BsonDocument command = new BsonDocument("listDatabases", new BsonInt32(1)); - if (maxTimeMS > 0) { - command.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } - if (filter != null) { - command.put("filter", filter); - } - if (nameOnly != null) { - command.put("nameOnly", new BsonBoolean(nameOnly)); - } - if (authorizedDatabasesOnly != null) { - command.put("authorizedDatabases", new BsonBoolean(authorizedDatabasesOnly)); - } - putIfNotNull(command, "comment", comment); - return command; + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument("listDatabases", new BsonInt32(1)); + putIfNotNull(commandDocument, "filter", filter); + putIfNotNull(commandDocument, "nameOnly", nameOnly); + putIfNotNull(commandDocument, "authorizedDatabases", authorizedDatabasesOnly); + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java index e4d0138121d..e540f752dbc 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java @@ -18,6 +18,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; @@ -26,13 +27,11 @@ import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; -import org.bson.BsonInt64; import org.bson.BsonString; import org.bson.BsonValue; import org.bson.codecs.Codec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.notNull; @@ -50,6 +49,7 @@ import static com.mongodb.internal.operation.CursorHelper.getCursorDocumentFromBatchSize; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; @@ -69,8 +69,8 @@ public class ListIndexesOperation implements AsyncReadOperation decoder; private boolean retryReads; private int batchSize; - private long maxTimeMS; private BsonValue comment; + private TimeoutMode timeoutMode = TimeoutMode.CURSOR_LIFETIME; public ListIndexesOperation(final MongoNamespace namespace, final Decoder decoder) { this.namespace = notNull("namespace", namespace); @@ -86,17 +86,6 @@ public ListIndexesOperation batchSize(final int batchSize) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public ListIndexesOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public ListIndexesOperation retryReads(final boolean retryReads) { this.retryReads = retryReads; return this; @@ -116,15 +105,26 @@ public ListIndexesOperation comment(@Nullable final BsonValue comment) { return this; } + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + public ListIndexesOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + @Override public BatchCursor execute(final ReadBinding binding) { - RetryState retryState = initialRetryState(retryReads); + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); Supplier> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { - retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); try { - return createReadCommandAndExecute(retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(), - createCommandDecoder(), transformer(), connection); + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, namespace.getDatabaseName(), + getCommandCreator(), createCommandDecoder(), transformer(), connection); } catch (MongoCommandException e) { return rethrowIfNotNamespaceError(e, createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize)); @@ -136,18 +136,20 @@ public BatchCursor execute(final ReadBinding binding) { @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - RetryState retryState = initialRetryState(retryReads); + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); binding.retain(); AsyncCallbackSupplier> asyncRead = decorateReadWithRetriesAsync( retryState, binding.getOperationContext(), (AsyncCallbackSupplier>) funcCallback -> withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback, (source, connection, releasingCallback) -> { if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), - binding.getSessionContext()), releasingCallback)) { + binding.getOperationContext()), releasingCallback)) { return; } - createReadCommandAndExecuteAsync(retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(), - createCommandDecoder(), asyncTransformer(), connection, (result, t) -> { + createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source, + namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(), + asyncTransformer(), connection, + (result, t) -> { if (t != null && !isNamespaceError(t)) { releasingCallback.onResult(null, t); } else { @@ -162,25 +164,23 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb private CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> getCommand(); - } - - private BsonDocument getCommand() { - BsonDocument command = new BsonDocument("listIndexes", new BsonString(namespace.getCollectionName())) - .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); - if (maxTimeMS > 0) { - command.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } - putIfNotNull(command, "comment", comment); - return command; + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument("listIndexes", new BsonString(namespace.getCollectionName())) + .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; } private CommandReadTransformer> transformer() { - return (result, source, connection) -> cursorDocumentToBatchCursor(result, decoder, comment, source, connection, batchSize); + return (result, source, connection) -> + cursorDocumentToBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); } private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result, decoder, comment, source, connection, batchSize); + return (result, source, connection) -> + cursorDocumentToAsyncBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); } private Codec createCommandDecoder() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java index 74313059099..0f9a81dbf19 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java @@ -31,12 +31,11 @@ import org.bson.BsonValue; import org.bson.codecs.Decoder; -import java.util.Collections; -import java.util.concurrent.TimeUnit; - import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; +import static java.util.Collections.singletonList; + /** * An operation that lists Alas Search indexes with the help of {@value #STAGE_LIST_SEARCH_INDEXES} pipeline stage. @@ -56,26 +55,18 @@ final class ListSearchIndexesOperation private final Collation collation; @Nullable private final BsonValue comment; - private final long maxTimeMS; @Nullable private final String indexName; private final boolean retryReads; - ListSearchIndexesOperation(final MongoNamespace namespace, - final Decoder decoder, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse, - final boolean retryReads) { + ListSearchIndexesOperation(final MongoNamespace namespace, final Decoder decoder, @Nullable final String indexName, + @Nullable final Integer batchSize, @Nullable final Collation collation, @Nullable final BsonValue comment, + @Nullable final Boolean allowDiskUse, final boolean retryReads) { this.namespace = namespace; this.decoder = decoder; this.allowDiskUse = allowDiskUse; this.batchSize = batchSize; this.collation = collation; - this.maxTimeMS = maxTimeMS; this.comment = comment; this.indexName = indexName; this.retryReads = retryReads; @@ -122,14 +113,12 @@ public AsyncReadOperation asAsyncExplainableOperation(@Nullable final Exp private AggregateOperation asAggregateOperation() { BsonDocument searchDefinition = getSearchDefinition(); BsonDocument listSearchIndexesStage = new BsonDocument(STAGE_LIST_SEARCH_INDEXES, searchDefinition); - - return new AggregateOperation<>(namespace, Collections.singletonList(listSearchIndexesStage), decoder) + return new AggregateOperation<>(namespace, singletonList(listSearchIndexesStage), decoder) .retryReads(retryReads) .collation(collation) .comment(comment) .allowDiskUse(allowDiskUse) - .batchSize(batchSize) - .maxTime(maxTimeMS, TimeUnit.MILLISECONDS); + .batchSize(batchSize); } @NonNull diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java index 18546027c05..b93be56d6f2 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java @@ -20,7 +20,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; -import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -32,27 +32,21 @@ import org.bson.codecs.BsonDocumentCodec; import java.util.List; -import java.util.concurrent.TimeUnit; -import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandWriteTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; -import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; -import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; -import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; import static com.mongodb.internal.operation.SyncOperationHelper.CommandWriteTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; -import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** * Operation that runs a Map Reduce against a MongoDB instance. This operation does not support "inline" results, i.e. the results will @@ -63,8 +57,7 @@ * *

      This class is not part of the public API and may be removed or changed at any time

      */ -public class -MapReduceToCollectionOperation implements AsyncWriteOperation, WriteOperation { +public class MapReduceToCollectionOperation implements AsyncWriteOperation, WriteOperation { private final MongoNamespace namespace; private final BsonJavaScript mapFunction; private final BsonJavaScript reduceFunction; @@ -77,7 +70,6 @@ private int limit; private boolean jsMode; private boolean verbose; - private long maxTimeMS; private String action = "replace"; private String databaseName; private Boolean bypassDocumentValidation; @@ -85,13 +77,7 @@ private static final List VALID_ACTIONS = asList("replace", "merge", "reduce"); public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, - final BsonJavaScript reduceFunction, final String collectionName) { - this(namespace, mapFunction, reduceFunction, collectionName, null); - } - - public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, - final BsonJavaScript reduceFunction, @Nullable final String collectionName, - @Nullable final WriteConcern writeConcern) { + final BsonJavaScript reduceFunction, @Nullable final String collectionName, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); this.mapFunction = notNull("mapFunction", mapFunction); this.reduceFunction = notNull("reduceFunction", reduceFunction); @@ -182,17 +168,6 @@ public MapReduceToCollectionOperation verbose(final boolean verbose) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, MILLISECONDS); - } - - public MapReduceToCollectionOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public String getAction() { return action; } @@ -234,23 +209,16 @@ public MapReduceToCollectionOperation collation(@Nullable final Collation collat @Override public MapReduceStatistics execute(final WriteBinding binding) { - return withConnection(binding, connection -> assertNotNull(executeCommand(binding, namespace.getDatabaseName(), - getCommand(connection.getDescription()), connection, transformer()))); + return executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), transformer(binding + .getOperationContext() + .getTimeoutContext())); } @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { - withAsyncConnection(binding, (connection, t) -> { - SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); - if (t != null) { - errHandlingCallback.onResult(null, t); - } else { - executeCommandAsync(binding, namespace.getDatabaseName(), - getCommand(connection.getDescription()), connection, transformerAsync(), - releasingCallback(errHandlingCallback, connection)); - - } - }); + executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), transformerAsync(binding + .getOperationContext() + .getTimeoutContext()), callback); } /** @@ -274,54 +242,56 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai } private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { - return new CommandReadOperation<>(namespace.getDatabaseName(), - ExplainHelper.asExplainCommand(getCommand(null), explainVerbosity), - new BsonDocumentCodec()); + return new CommandReadOperation<>(getNamespace().getDatabaseName(), + (operationContext, serverDescription, connectionDescription) -> + asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription), + explainVerbosity), new BsonDocumentCodec()); } - private CommandWriteTransformer transformer() { + private CommandWriteTransformer transformer(final TimeoutContext timeoutContext) { return (result, connection) -> { throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), - connection.getDescription().getMaxWireVersion()); + connection.getDescription().getMaxWireVersion(), timeoutContext); return MapReduceHelper.createStatistics(result); }; } - private CommandWriteTransformerAsync transformerAsync() { + private CommandWriteTransformerAsync transformerAsync(final TimeoutContext timeoutContext) { return (result, connection) -> { throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), - connection.getDescription().getMaxWireVersion()); + connection.getDescription().getMaxWireVersion(), timeoutContext); return MapReduceHelper.createStatistics(result); }; } - private BsonDocument getCommand(@Nullable final ConnectionDescription description) { - BsonDocument outputDocument = new BsonDocument(getAction(), new BsonString(getCollectionName())); - if (getDatabaseName() != null) { - outputDocument.put("db", new BsonString(getDatabaseName())); - } - BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName())) - .append("map", getMapFunction()) - .append("reduce", getReduceFunction()) - .append("out", outputDocument); - - putIfNotNull(commandDocument, "query", getFilter()); - putIfNotNull(commandDocument, "sort", getSort()); - putIfNotNull(commandDocument, "finalize", getFinalizeFunction()); - putIfNotNull(commandDocument, "scope", getScope()); - putIfTrue(commandDocument, "verbose", isVerbose()); - putIfNotZero(commandDocument, "limit", getLimit()); - putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS)); - putIfTrue(commandDocument, "jsMode", isJsMode()); - if (bypassDocumentValidation != null && description != null) { - commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); - } - if (description != null) { + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument outputDocument = new BsonDocument(getAction(), new BsonString(getCollectionName())); + if (getDatabaseName() != null) { + outputDocument.put("db", new BsonString(getDatabaseName())); + } + BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName())) + .append("map", getMapFunction()) + .append("reduce", getReduceFunction()) + .append("out", outputDocument); + + putIfNotNull(commandDocument, "query", getFilter()); + putIfNotNull(commandDocument, "sort", getSort()); + putIfNotNull(commandDocument, "finalize", getFinalizeFunction()); + putIfNotNull(commandDocument, "scope", getScope()); + putIfTrue(commandDocument, "verbose", isVerbose()); + putIfNotZero(commandDocument, "limit", getLimit()); + putIfTrue(commandDocument, "jsMode", isJsMode()); + if (bypassDocumentValidation != null) { + commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); + } appendWriteConcernToCommand(writeConcern, commandDocument); - } - if (collation != null) { - commandDocument.put("collation", collation.asDocument()); - } - return commandDocument; + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + return commandDocument; + }; } + } diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java index ff10df61f0e..695053e8845 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java @@ -22,8 +22,6 @@ import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.connection.NoOpSessionContext; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonInt32; @@ -32,8 +30,6 @@ import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -45,10 +41,8 @@ import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; -import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      Operation that runs a Map Reduce against a MongoDB instance. This operation only supports "inline" results, i.e. the results will be @@ -71,11 +65,10 @@ public class MapReduceWithInlineResultsOperation implements AsyncReadOperatio private int limit; private boolean jsMode; private boolean verbose; - private long maxTimeMS; private Collation collation; public MapReduceWithInlineResultsOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, - final BsonJavaScript reduceFunction, final Decoder decoder) { + final BsonJavaScript reduceFunction, final Decoder decoder) { this.namespace = notNull("namespace", namespace); this.mapFunction = notNull("mapFunction", mapFunction); this.reduceFunction = notNull("reduceFunction", reduceFunction); @@ -170,31 +163,18 @@ public MapReduceWithInlineResultsOperation collation(@Nullable final Collatio return this; } - - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, MILLISECONDS); - } - - - public MapReduceWithInlineResultsOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - - @Override public MapReduceBatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), + return executeRetryableRead(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, "results"), transformer(), false); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(decoder, "results"), + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, "results"), asyncTransformer(), false, errHandlingCallback); } @@ -208,7 +188,8 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { return new CommandReadOperation<>(namespace.getDatabaseName(), - asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), + (operationContext, serverDescription, connectionDescription) -> + asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription), explainVerbosity), new BsonDocumentCodec()); } @@ -226,28 +207,26 @@ private CommandReadTransformerAsync> MapReduceHelper.createStatistics(result)); } - private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion()); - } - - private BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) { - BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName())) - .append("map", getMapFunction()) - .append("reduce", getReduceFunction()) - .append("out", new BsonDocument("inline", new BsonInt32(1))); - - putIfNotNull(commandDocument, "query", getFilter()); - putIfNotNull(commandDocument, "sort", getSort()); - putIfNotNull(commandDocument, "finalize", getFinalizeFunction()); - putIfNotNull(commandDocument, "scope", getScope()); - putIfTrue(commandDocument, "verbose", isVerbose()); - appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument); - putIfNotZero(commandDocument, "limit", getLimit()); - putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS)); - putIfTrue(commandDocument, "jsMode", isJsMode()); - if (collation != null) { - commandDocument.put("collation", collation.asDocument()); - } - return commandDocument; + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + + BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName())) + .append("map", getMapFunction()) + .append("reduce", getReduceFunction()) + .append("out", new BsonDocument("inline", new BsonInt32(1))); + + putIfNotNull(commandDocument, "query", getFilter()); + putIfNotNull(commandDocument, "sort", getSort()); + putIfNotNull(commandDocument, "finalize", getFinalizeFunction()); + putIfNotNull(commandDocument, "scope", getScope()); + putIfTrue(commandDocument, "verbose", isVerbose()); + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), commandDocument); + putIfNotZero(commandDocument, "limit", getLimit()); + putIfTrue(commandDocument, "jsMode", isJsMode()); + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + return commandDocument; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java index fe58fb0bd75..c506bbda2fe 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -22,6 +22,7 @@ import com.mongodb.assertions.Assertions; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackLoop; import com.mongodb.internal.async.function.AsyncCallbackRunnable; @@ -87,10 +88,10 @@ public class MixedBulkWriteOperation implements AsyncWriteOperation writeRequests, - final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites) { - this.ordered = ordered; + final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites) { this.namespace = notNull("namespace", namespace); this.writeRequests = notNull("writes", writeRequests); + this.ordered = ordered; this.writeConcern = notNull("writeConcern", writeConcern); this.retryWrites = retryWrites; isTrueArgument("writes is not an empty list", !writeRequests.isEmpty()); @@ -176,6 +177,7 @@ private boolean shouldAttemptToRetryWrite(final RetryState retryState, final Thr @Override public BulkWriteResult execute(final WriteBinding binding) { + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); /* We cannot use the tracking of attempts built in the `RetryState` class because conceptually we have to maintain multiple attempt * counters while executing a single bulk write operation: * - a counter that limits attempts to select server and checkout a connection before we created a batch; @@ -183,23 +185,23 @@ public BulkWriteResult execute(final WriteBinding binding) { * Fortunately, these counters do not exist concurrently with each other. While maintaining the counters manually, * we must adhere to the contract of `RetryingSyncSupplier`. When the retry timeout is implemented, there will be no counters, * and the code related to the attempt tracking in `BulkWriteTracker` will be removed. */ - RetryState retryState = new RetryState(); - BulkWriteTracker.attachNew(retryState, retryWrites); + RetryState retryState = new RetryState(timeoutContext); + BulkWriteTracker.attachNew(retryState, retryWrites, timeoutContext); Supplier retryingBulkWrite = decorateWriteWithRetries(retryState, binding.getOperationContext(), () -> withSourceAndConnection(binding::getWriteConnectionSource, true, (source, connection) -> { ConnectionDescription connectionDescription = connection.getDescription(); // attach `maxWireVersion` ASAP because it is used to check whether we can retry retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true); - SessionContext sessionContext = binding.getSessionContext(); + SessionContext sessionContext = binding.getOperationContext().getSessionContext(); WriteConcern writeConcern = getAppliedWriteConcern(sessionContext); if (!isRetryableWrite(retryWrites, getAppliedWriteConcern(sessionContext), connectionDescription, sessionContext)) { - handleMongoWriteConcernWithResponseException(retryState, true); + handleMongoWriteConcernWithResponseException(retryState, true, timeoutContext); } validateWriteRequests(connectionDescription, bypassDocumentValidation, writeRequests, writeConcern); if (!retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail).batch().isPresent()) { BulkWriteTracker.attachNew(retryState, BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, - bypassDocumentValidation, retryWrites, writeRequests, sessionContext, comment, variables)); + bypassDocumentValidation, retryWrites, writeRequests, binding.getOperationContext(), comment, variables), timeoutContext); } return executeBulkWriteBatch(retryState, binding, connection); }) @@ -212,9 +214,10 @@ public BulkWriteResult execute(final WriteBinding binding) { } public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); // see the comment in `execute(WriteBinding)` explaining the manual tracking of attempts - RetryState retryState = new RetryState(); - BulkWriteTracker.attachNew(retryState, retryWrites); + RetryState retryState = new RetryState(timeoutContext); + BulkWriteTracker.attachNew(retryState, retryWrites, timeoutContext); binding.retain(); AsyncCallbackSupplier retryingBulkWrite = this.decorateWriteWithRetries(retryState, binding.getOperationContext(), @@ -224,10 +227,10 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall ConnectionDescription connectionDescription = connection.getDescription(); // attach `maxWireVersion` ASAP because it is used to check whether we can retry retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true); - SessionContext sessionContext = binding.getSessionContext(); + SessionContext sessionContext = binding.getOperationContext().getSessionContext(); WriteConcern writeConcern = getAppliedWriteConcern(sessionContext); if (!isRetryableWrite(retryWrites, getAppliedWriteConcern(sessionContext), connectionDescription, sessionContext) - && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallback)) { + && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallback, timeoutContext)) { return; } if (validateWriteRequestsAndCompleteIfInvalid(connectionDescription, bypassDocumentValidation, writeRequests, @@ -238,7 +241,7 @@ && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallba if (!retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail).batch().isPresent()) { BulkWriteTracker.attachNew(retryState, BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, - bypassDocumentValidation, retryWrites, writeRequests, sessionContext, comment, variables)); + bypassDocumentValidation, retryWrites, writeRequests, binding.getOperationContext(), comment, variables), timeoutContext); } } catch (Throwable t) { releasingCallback.onResult(null, t); @@ -255,12 +258,15 @@ private BulkWriteResult executeBulkWriteBatch(final RetryState retryState, final .orElseThrow(Assertions::fail); BulkWriteBatch currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail); int maxWireVersion = connection.getDescription().getMaxWireVersion(); + OperationContext operationContext = binding.getOperationContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + while (currentBatch.shouldProcessBatch()) { try { - BsonDocument result = executeCommand(connection, currentBatch, binding); - if (currentBatch.getRetryWrites() && !binding.getSessionContext().hasActiveTransaction()) { + BsonDocument result = executeCommand(operationContext, connection, currentBatch); + if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) { MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result, - connection.getDescription().getServerAddress(), "errMsg"); + connection.getDescription().getServerAddress(), "errMsg", timeoutContext); if (writeConcernBasedError != null) { if (currentBulkWriteTracker.lastAttempt()) { addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion); @@ -271,19 +277,21 @@ private BulkWriteResult executeBulkWriteBatch(final RetryState retryState, final } } currentBatch.addResult(result); - currentBulkWriteTracker = BulkWriteTracker.attachNext(retryState, currentBatch); + currentBulkWriteTracker = BulkWriteTracker.attachNext(retryState, currentBatch, timeoutContext); currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail); } catch (MongoException exception) { if (!retryState.isFirstAttempt() && !(exception instanceof MongoWriteConcernWithResponseException)) { addRetryableWriteErrorLabel(exception, maxWireVersion); } - handleMongoWriteConcernWithResponseException(retryState, false); + handleMongoWriteConcernWithResponseException(retryState, false, timeoutContext); throw exception; } } try { return currentBatch.getResult(); } catch (MongoException e) { + /* if we get here, some of the batches failed on the server side, + * so we need to mark the last attempt to avoid retrying. */ retryState.markAsLastAttempt(); throw e; } @@ -301,11 +309,13 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async if (loopState.breakAndCompleteIf(() -> !currentBatch.shouldProcessBatch(), iterationCallback)) { return; } - executeCommandAsync(binding, connection, currentBatch, (result, t) -> { + OperationContext operationContext = binding.getOperationContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + executeCommandAsync(operationContext, connection, currentBatch, (result, t) -> { if (t == null) { - if (currentBatch.getRetryWrites() && !binding.getSessionContext().hasActiveTransaction()) { + if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) { MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result, - connection.getDescription().getServerAddress(), "errMsg"); + connection.getDescription().getServerAddress(), "errMsg", binding.getOperationContext().getTimeoutContext()); if (writeConcernBasedError != null) { if (currentBulkWriteTracker.lastAttempt()) { addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion); @@ -319,7 +329,7 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async } } currentBatch.addResult(result); - BulkWriteTracker.attachNext(retryState, currentBatch); + BulkWriteTracker.attachNext(retryState, currentBatch, timeoutContext); iterationCallback.onResult(null, null); } else { if (t instanceof MongoException) { @@ -327,7 +337,7 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async if (!retryState.isFirstAttempt() && !(exception instanceof MongoWriteConcernWithResponseException)) { addRetryableWriteErrorLabel(exception, maxWireVersion); } - if (handleMongoWriteConcernWithResponseExceptionAsync(retryState, null)) { + if (handleMongoWriteConcernWithResponseExceptionAsync(retryState, null, timeoutContext)) { return; } } @@ -345,6 +355,8 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async .flatMap(BulkWriteTracker::batch).orElseThrow(Assertions::fail).getResult(); } catch (Throwable loopResultT) { if (loopResultT instanceof MongoException) { + /* if we get here, some of the batches failed on the server side, + * so we need to mark the last attempt to avoid retrying. */ retryState.markAsLastAttempt(); } callback.onResult(null, loopResultT); @@ -355,7 +367,9 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async }); } - private void handleMongoWriteConcernWithResponseException(final RetryState retryState, final boolean breakAndThrowIfDifferent) { + private void handleMongoWriteConcernWithResponseException(final RetryState retryState, + final boolean breakAndThrowIfDifferent, + final TimeoutContext timeoutContext) { if (!retryState.isFirstAttempt()) { RuntimeException prospectiveFailedResult = (RuntimeException) retryState.exception().orElse(null); boolean prospectiveResultIsWriteConcernException = prospectiveFailedResult instanceof MongoWriteConcernWithResponseException; @@ -365,14 +379,15 @@ private void handleMongoWriteConcernWithResponseException(final RetryState retry .batch().ifPresent(bulkWriteBatch -> { bulkWriteBatch.addResult( (BsonDocument) ((MongoWriteConcernWithResponseException) prospectiveFailedResult).getResponse()); - BulkWriteTracker.attachNext(retryState, bulkWriteBatch); + BulkWriteTracker.attachNext(retryState, bulkWriteBatch, timeoutContext); }); } } } private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetryState retryState, - @Nullable final SingleResultCallback callback) { + @Nullable final SingleResultCallback callback, + final TimeoutContext timeoutContext) { if (!retryState.isFirstAttempt()) { RuntimeException prospectiveFailedResult = (RuntimeException) retryState.exception().orElse(null); boolean prospectiveResultIsWriteConcernException = prospectiveFailedResult instanceof MongoWriteConcernWithResponseException; @@ -384,7 +399,7 @@ private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetrySta .batch().ifPresent(bulkWriteBatch -> { bulkWriteBatch.addResult( (BsonDocument) ((MongoWriteConcernWithResponseException) prospectiveFailedResult).getResponse()); - BulkWriteTracker.attachNext(retryState, bulkWriteBatch); + BulkWriteTracker.attachNext(retryState, bulkWriteBatch, timeoutContext); }); } } @@ -392,16 +407,17 @@ private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetrySta } @Nullable - private BsonDocument executeCommand(final Connection connection, final BulkWriteBatch batch, final WriteBinding binding) { + private BsonDocument executeCommand(final OperationContext operationContext, final Connection connection, final BulkWriteBatch batch) { return connection.command(namespace.getDatabaseName(), batch.getCommand(), NO_OP_FIELD_NAME_VALIDATOR, null, batch.getDecoder(), - binding, shouldAcknowledge(batch, binding.getSessionContext()), batch.getPayload(), batch.getFieldNameValidator()); + operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()), + batch.getPayload(), batch.getFieldNameValidator()); } - private void executeCommandAsync(final AsyncWriteBinding binding, final AsyncConnection connection, final BulkWriteBatch batch, + private void executeCommandAsync(final OperationContext operationContext, final AsyncConnection connection, final BulkWriteBatch batch, final SingleResultCallback callback) { connection.commandAsync(namespace.getDatabaseName(), batch.getCommand(), NO_OP_FIELD_NAME_VALIDATOR, null, batch.getDecoder(), - binding, shouldAcknowledge(batch, binding.getSessionContext()), batch.getPayload(), batch.getFieldNameValidator(), - callback); + operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()), + batch.getPayload(), batch.getFieldNameValidator(), callback); } private WriteConcern getAppliedWriteConcern(final SessionContext sessionContext) { @@ -427,20 +443,21 @@ private void addErrorLabelsToWriteConcern(final BsonDocument result, final Set diff --git a/driver-core/src/main/com/mongodb/internal/operation/Operations.java b/driver-core/src/main/com/mongodb/internal/operation/Operations.java index 89a61558e59..e271f23d522 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/Operations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/Operations.java @@ -21,6 +21,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.BulkWriteOptions; import com.mongodb.client.model.ClusteredIndexOptions; import com.mongodb.client.model.Collation; @@ -86,7 +87,6 @@ import static com.mongodb.assertions.Assertions.notNull; import static java.lang.String.format; import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; final class Operations { private final MongoNamespace namespace; @@ -145,12 +145,12 @@ boolean isRetryReads() { } CountDocumentsOperation countDocuments(final Bson filter, final CountOptions options) { - CountDocumentsOperation operation = new CountDocumentsOperation(assertNotNull(namespace)) + CountDocumentsOperation operation = new CountDocumentsOperation( + assertNotNull(namespace)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .skip(options.getSkip()) .limit(options.getLimit()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .collation(options.getCollation()) .comment(options.getComment()); if (options.getHint() != null) { @@ -162,9 +162,9 @@ CountDocumentsOperation countDocuments(final Bson filter, final CountOptions opt } EstimatedDocumentCountOperation estimatedDocumentCount(final EstimatedDocumentCountOptions options) { - return new EstimatedDocumentCountOperation(assertNotNull(namespace)) + return new EstimatedDocumentCountOperation( + assertNotNull(namespace)) .retryReads(retryReads) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .comment(options.getComment()); } @@ -185,14 +185,13 @@ FindOperation find(final MongoNamespace findNamespace, @Nulla private FindOperation createFindOperation(final MongoNamespace findNamespace, @Nullable final Bson filter, final Class resultClass, final FindOptions options) { - FindOperation operation = new FindOperation<>(findNamespace, codecRegistry.get(resultClass)) + FindOperation operation = new FindOperation<>( + findNamespace, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(filter == null ? new BsonDocument() : filter.toBsonDocument(documentClass, codecRegistry)) .batchSize(options.getBatchSize()) .skip(options.getSkip()) .limit(options.getLimit()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .maxAwaitTime(options.getMaxAwaitTime(MILLISECONDS), MILLISECONDS) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .cursorType(options.getCursorType()) @@ -205,7 +204,8 @@ private FindOperation createFindOperation(final MongoNamespac .max(toBsonDocument(options.getMax())) .returnKey(options.isReturnKey()) .showRecordId(options.isShowRecordId()) - .allowDiskUse(options.isAllowDiskUse()); + .allowDiskUse(options.isAllowDiskUse()) + .timeoutMode(options.getTimeoutMode()); if (options.getHint() != null) { operation.hint(toBsonDocument(options.getHint())); @@ -215,65 +215,59 @@ private FindOperation createFindOperation(final MongoNamespac return operation; } - DistinctOperation distinct(final String fieldName, @Nullable final Bson filter, - final Class resultClass, final long maxTimeMS, - final Collation collation, final BsonValue comment) { - return new DistinctOperation<>(assertNotNull(namespace), fieldName, codecRegistry.get(resultClass)) + DistinctOperation distinct(final String fieldName, @Nullable final Bson filter, final Class resultClass, + final Collation collation, final BsonValue comment) { + return new DistinctOperation<>(assertNotNull(namespace), + fieldName, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(filter == null ? null : filter.toBsonDocument(documentClass, codecRegistry)) - .maxTime(maxTimeMS, MILLISECONDS) .collation(collation) .comment(comment); - } AggregateOperation aggregate(final List pipeline, final Class resultClass, - final long maxTimeMS, final long maxAwaitTimeMS, @Nullable final Integer batchSize, - final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, - final BsonValue comment, - final Bson variables, final Boolean allowDiskUse, - final AggregationLevel aggregationLevel) { - return new AggregateOperation<>(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)), - codecRegistry.get(resultClass), aggregationLevel) + @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize, + final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, + final BsonValue comment, final Bson variables, final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { + return new AggregateOperation<>(assertNotNull(namespace), + assertNotNull(toBsonDocumentList(pipeline)), codecRegistry.get(resultClass), aggregationLevel) .retryReads(retryReads) - .maxTime(maxTimeMS, MILLISECONDS) - .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS) .allowDiskUse(allowDiskUse) .batchSize(batchSize) .collation(collation) .hint(hint != null ? toBsonDocument(hint) : (hintString != null ? new BsonString(hintString) : null)) .comment(comment) - .let(toBsonDocument(variables)); + .let(toBsonDocument(variables)) + .timeoutMode(timeoutMode); } - AggregateToCollectionOperation aggregateToCollection(final List pipeline, final long maxTimeMS, - final Boolean allowDiskUse, final Boolean bypassDocumentValidation, - final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment, - final Bson variables, final AggregationLevel aggregationLevel) { - return new AggregateToCollectionOperation(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)), - readConcern, writeConcern, aggregationLevel) - .maxTime(maxTimeMS, MILLISECONDS) + AggregateToCollectionOperation aggregateToCollection(final List pipeline, @Nullable final TimeoutMode timeoutMode, + final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, @Nullable final Bson hint, + @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { + return new AggregateToCollectionOperation(assertNotNull(namespace), + assertNotNull(toBsonDocumentList(pipeline)), readConcern, writeConcern, aggregationLevel) .allowDiskUse(allowDiskUse) .bypassDocumentValidation(bypassDocumentValidation) .collation(collation) .hint(hint != null ? toBsonDocument(hint) : (hintString != null ? new BsonString(hintString) : null)) .comment(comment) - .let(toBsonDocument(variables)); + .let(toBsonDocument(variables)) + .timeoutMode(timeoutMode); } @SuppressWarnings("deprecation") MapReduceToCollectionOperation mapReduceToCollection(final String databaseName, final String collectionName, final String mapFunction, final String reduceFunction, @Nullable final String finalizeFunction, final Bson filter, - final int limit, final long maxTimeMS, final boolean jsMode, + final int limit, final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final com.mongodb.client.model.MapReduceAction action, final Boolean bypassDocumentValidation, final Collation collation) { - MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation(assertNotNull(namespace), - new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), collectionName, writeConcern) + MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation( + assertNotNull(namespace), new BsonJavaScript(mapFunction), + new BsonJavaScript(reduceFunction), collectionName, writeConcern) .filter(toBsonDocument(filter)) .limit(limit) - .maxTime(maxTimeMS, MILLISECONDS) .jsMode(jsMode) .scope(toBsonDocument(scope)) .sort(toBsonDocument(sort)) @@ -290,20 +284,15 @@ MapReduceToCollectionOperation mapReduceToCollection(final String databaseName, } MapReduceWithInlineResultsOperation mapReduce(final String mapFunction, final String reduceFunction, - @Nullable final String finalizeFunction, - final Class resultClass, - final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, - final Bson sort, final boolean verbose, - final Collation collation) { + @Nullable final String finalizeFunction, final Class resultClass, final Bson filter, final int limit, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, + final Collation collation) { MapReduceWithInlineResultsOperation operation = - new MapReduceWithInlineResultsOperation<>(assertNotNull(namespace), - new BsonJavaScript(mapFunction), - new BsonJavaScript(reduceFunction), + new MapReduceWithInlineResultsOperation<>( + assertNotNull(namespace), new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), codecRegistry.get(resultClass)) .filter(toBsonDocument(filter)) .limit(limit) - .maxTime(maxTimeMS, MILLISECONDS) .jsMode(jsMode) .scope(toBsonDocument(scope)) .sort(toBsonDocument(sort)) @@ -316,11 +305,11 @@ MapReduceWithInlineResultsOperation mapReduce(final String ma } FindAndDeleteOperation findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) { - return new FindAndDeleteOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec()) + return new FindAndDeleteOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec()) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .collation(options.getCollation()) .hint(toBsonDocument(options.getHint())) .hintString(options.getHintString()) @@ -330,14 +319,13 @@ FindAndDeleteOperation findOneAndDelete(final Bson filter, final Find FindAndReplaceOperation findOneAndReplace(final Bson filter, final TDocument replacement, final FindOneAndReplaceOptions options) { - return new FindAndReplaceOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(), - documentToBsonDocument(replacement)) + return new FindAndReplaceOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), documentToBsonDocument(replacement)) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .hint(toBsonDocument(options.getHint())) @@ -347,14 +335,13 @@ FindAndReplaceOperation findOneAndReplace(final Bson filter, final TD } FindAndUpdateOperation findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { - return new FindAndUpdateOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(), - assertNotNull(toBsonDocument(update))) + return new FindAndUpdateOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocument(update))) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .arrayFilters(toBsonDocumentList(options.getArrayFilters())) @@ -366,14 +353,13 @@ FindAndUpdateOperation findOneAndUpdate(final Bson filter, final Bson FindAndUpdateOperation findOneAndUpdate(final Bson filter, final List update, final FindOneAndUpdateOptions options) { - return new FindAndUpdateOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(), - assertNotNull(toBsonDocumentList(update))) + return new FindAndUpdateOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocumentList(update))) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .arrayFilters(toBsonDocumentList(options.getArrayFilters())) @@ -430,8 +416,7 @@ MixedBulkWriteOperation updateMany(final Bson filter, final List .comment(options.getComment()).let(options.getLet())); } - MixedBulkWriteOperation insertMany(final List documents, - final InsertManyOptions options) { + MixedBulkWriteOperation insertMany(final List documents, final InsertManyOptions options) { notNull("documents", documents); List requests = new ArrayList<>(documents.size()); for (TDocument document : documents) { @@ -444,13 +429,14 @@ MixedBulkWriteOperation insertMany(final List documents, requests.add(new InsertRequest(documentToBsonDocument(document))); } - return new MixedBulkWriteOperation(assertNotNull(namespace), requests, options.isOrdered(), writeConcern, retryWrites) - .bypassDocumentValidation(options.getBypassDocumentValidation()).comment(options.getComment()); + return new MixedBulkWriteOperation(assertNotNull(namespace), + requests, options.isOrdered(), writeConcern, retryWrites) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()); } @SuppressWarnings("unchecked") - MixedBulkWriteOperation bulkWrite(final List> requests, - final BulkWriteOptions options) { + MixedBulkWriteOperation bulkWrite(final List> requests, final BulkWriteOptions options) { notNull("requests", requests); List writeRequests = new ArrayList<>(requests.size()); for (WriteModel writeModel : requests) { @@ -465,9 +451,8 @@ MixedBulkWriteOperation bulkWrite(final List replaceOneModel = (ReplaceOneModel) writeModel; - writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(replaceOneModel.getFilter())), documentToBsonDocument(replaceOneModel - .getReplacement()), - WriteRequest.Type.REPLACE) + writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(replaceOneModel.getFilter())), + documentToBsonDocument(replaceOneModel.getReplacement()), WriteRequest.Type.REPLACE) .upsert(replaceOneModel.getReplaceOptions().isUpsert()) .collation(replaceOneModel.getReplaceOptions().getCollation()) .hint(toBsonDocument(replaceOneModel.getReplaceOptions().getHint())) @@ -512,7 +497,8 @@ MixedBulkWriteOperation bulkWrite(final List CommandReadOperation commandRead(final Bson command, final Class resultClass) { notNull("command", command); notNull("resultClass", resultClass); - return new CommandReadOperation<>(assertNotNull(namespace).getDatabaseName(), assertNotNull(toBsonDocument(command)), - codecRegistry.get(resultClass)); + return new CommandReadOperation<>(assertNotNull(namespace).getDatabaseName(), + assertNotNull(toBsonDocument(command)), codecRegistry.get(resultClass)); } DropDatabaseOperation dropDatabase() { - return new DropDatabaseOperation(assertNotNull(namespace).getDatabaseName(), getWriteConcern()); + return new DropDatabaseOperation(assertNotNull(namespace).getDatabaseName(), + getWriteConcern()); } - CreateCollectionOperation createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { - CreateCollectionOperation operation = new CreateCollectionOperation(assertNotNull(namespace).getDatabaseName(), - collectionName, writeConcern) + CreateCollectionOperation operation = new CreateCollectionOperation( + assertNotNull(namespace).getDatabaseName(), collectionName, writeConcern) .collation(createCollectionOptions.getCollation()) .capped(createCollectionOptions.isCapped()) .sizeInBytes(createCollectionOptions.getSizeInBytes()) @@ -576,7 +562,8 @@ CreateCollectionOperation createCollection(final String collectionName, final Cr DropCollectionOperation dropCollection( final DropCollectionOptions dropCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { - DropCollectionOperation operation = new DropCollectionOperation(assertNotNull(namespace), writeConcern); + DropCollectionOperation operation = new DropCollectionOperation( + assertNotNull(namespace), writeConcern); Bson encryptedFields = dropCollectionOptions.getEncryptedFields(); if (encryptedFields != null) { operation.encryptedFields(assertNotNull(toBsonDocument(encryptedFields))); @@ -592,17 +579,17 @@ DropCollectionOperation dropCollection( RenameCollectionOperation renameCollection(final MongoNamespace newCollectionNamespace, - final RenameCollectionOptions renameCollectionOptions) { - return new RenameCollectionOperation(assertNotNull(namespace), newCollectionNamespace, writeConcern) - .dropTarget(renameCollectionOptions.isDropTarget()); + final RenameCollectionOptions renameCollectionOptions) { + return new RenameCollectionOperation(assertNotNull(namespace), + newCollectionNamespace, writeConcern).dropTarget(renameCollectionOptions.isDropTarget()); } CreateViewOperation createView(final String viewName, final String viewOn, final List pipeline, final CreateViewOptions createViewOptions) { notNull("options", createViewOptions); notNull("pipeline", pipeline); - return new CreateViewOperation(assertNotNull(namespace).getDatabaseName(), viewName, viewOn, - assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation()); + return new CreateViewOperation(assertNotNull(namespace).getDatabaseName(), viewName, + viewOn, assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation()); } CreateIndexesOperation createIndexes(final List indexes, final CreateIndexOptions createIndexOptions) { @@ -635,8 +622,8 @@ CreateIndexesOperation createIndexes(final List indexes, final Creat .hidden(model.getOptions().isHidden()) ); } - return new CreateIndexesOperation(assertNotNull(namespace), indexRequests, writeConcern) - .maxTime(createIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS) + return new CreateIndexesOperation( + assertNotNull(namespace), indexRequests, writeConcern) .commitQuorum(createIndexOptions.getCommitQuorum()); } @@ -644,14 +631,12 @@ CreateSearchIndexesOperation createSearchIndexes(final List in List indexRequests = indexes.stream() .map(this::createSearchIndexRequest) .collect(Collectors.toList()); - return new CreateSearchIndexesOperation(assertNotNull(namespace), indexRequests); } UpdateSearchIndexesOperation updateSearchIndex(final String indexName, final Bson definition) { BsonDocument definitionDocument = assertNotNull(toBsonDocument(definition)); SearchIndexRequest searchIndexRequest = new SearchIndexRequest(definitionDocument, indexName); - return new UpdateSearchIndexesOperation(assertNotNull(namespace), searchIndexRequest); } @@ -662,47 +647,39 @@ DropSearchIndexOperation dropSearchIndex(final String indexName) { ListSearchIndexesOperation listSearchIndexes(final Class resultClass, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse) { - - - return new ListSearchIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass), maxTimeMS, - indexName, batchSize, collation, comment, allowDiskUse, retryReads); + @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { + return new ListSearchIndexesOperation<>(assertNotNull(namespace), + codecRegistry.get(resultClass), indexName, batchSize, collation, comment, allowDiskUse, retryReads); } - DropIndexOperation dropIndex(final String indexName, final DropIndexOptions dropIndexOptions) { - return new DropIndexOperation(assertNotNull(namespace), indexName, writeConcern) - .maxTime(dropIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS); + DropIndexOperation dropIndex(final String indexName, final DropIndexOptions ignoredOptions) { + return new DropIndexOperation(assertNotNull(namespace), indexName, writeConcern); } - DropIndexOperation dropIndex(final Bson keys, final DropIndexOptions dropIndexOptions) { - return new DropIndexOperation(assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern) - .maxTime(dropIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS); + DropIndexOperation dropIndex(final Bson keys, final DropIndexOptions ignoredOptions) { + return new DropIndexOperation(assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern); } ListCollectionsOperation listCollections(final String databaseName, final Class resultClass, final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, - @Nullable final Integer batchSize, final long maxTimeMS, - final BsonValue comment) { + @Nullable final Integer batchSize, + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { return new ListCollectionsOperation<>(databaseName, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .nameOnly(collectionNamesOnly) .authorizedCollections(authorizedCollections) .batchSize(batchSize == null ? 0 : batchSize) - .maxTime(maxTimeMS, MILLISECONDS) - .comment(comment); + .comment(comment) + .timeoutMode(timeoutMode); } ListDatabasesOperation listDatabases(final Class resultClass, final Bson filter, - final Boolean nameOnly, final long maxTimeMS, + final Boolean nameOnly, final Boolean authorizedDatabasesOnly, final BsonValue comment) { - return new ListDatabasesOperation<>(codecRegistry.get(resultClass)).maxTime(maxTimeMS, MILLISECONDS) + return new ListDatabasesOperation<>(codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .nameOnly(nameOnly) @@ -711,25 +688,28 @@ ListDatabasesOperation listDatabases(final Class res } ListIndexesOperation listIndexes(final Class resultClass, @Nullable final Integer batchSize, - final long maxTimeMS, final BsonValue comment) { - return new ListIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass)) + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return new ListIndexesOperation<>(assertNotNull(namespace), + codecRegistry.get(resultClass)) .retryReads(retryReads) .batchSize(batchSize == null ? 0 : batchSize) - .maxTime(maxTimeMS, MILLISECONDS) - .comment(comment); + .comment(comment) + .timeoutMode(timeoutMode); } ChangeStreamOperation changeStream(final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize, - final Collation collation, final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, + final Collation collation, final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { - return new ChangeStreamOperation<>(assertNotNull(namespace), fullDocument, fullDocumentBeforeChange, + return new ChangeStreamOperation<>( + assertNotNull(namespace), + fullDocument, + fullDocumentBeforeChange, assertNotNull(toBsonDocumentList(pipeline)), decoder, changeStreamLevel) .batchSize(batchSize) .collation(collation) .comment(comment) - .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS) .resumeAfter(resumeToken) .startAtOperationTime(startAtOperationTime) .startAfter(startAfter) @@ -773,7 +753,6 @@ private SearchIndexRequest createSearchIndexRequest(final SearchIndexModel model BsonDocument definition = assertNotNull(toBsonDocument(model.getDefinition())); String indexName = model.getName(); - SearchIndexRequest indexRequest = new SearchIndexRequest(definition, indexName); - return indexRequest; + return new SearchIndexRequest(definition, indexName); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java index 14d61105d11..aa5d2e7d451 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java @@ -24,6 +24,7 @@ *

      This class is not part of the public API and may be removed or changed at any time

      */ public interface ReadOperation { + /** * General execute which can return anything of type T * diff --git a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java index d6f7ee897ae..fd727f2fd81 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java @@ -53,12 +53,8 @@ public class RenameCollectionOperation implements AsyncWriteOperation, Wri private final WriteConcern writeConcern; private boolean dropTarget; - public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace) { - this(originalNamespace, newNamespace, null); - } - public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace, - @Nullable final WriteConcern writeConcern) { + @Nullable final WriteConcern writeConcern) { this.originalNamespace = notNull("originalNamespace", originalNamespace); this.newNamespace = notNull("newNamespace", newNamespace); this.writeConcern = writeConcern; @@ -79,7 +75,8 @@ public RenameCollectionOperation dropTarget(final boolean dropTarget) { @Override public Void execute(final WriteBinding binding) { - return withConnection(binding, connection -> executeCommand(binding, "admin", getCommand(), connection, writeConcernErrorTransformer())); + return withConnection(binding, connection -> executeCommand(binding, "admin", getCommand(), connection, + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext()))); } @Override @@ -90,7 +87,8 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall errHandlingCallback.onResult(null, t); } else { executeCommandAsync(binding, "admin", getCommand(), assertNotNull(connection), - writeConcernErrorTransformerAsync(), releasingCallback(errHandlingCallback, connection)); + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), + releasingCallback(errHandlingCallback, connection)); } }); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java index 5610f84dd36..43334109c20 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java @@ -18,6 +18,8 @@ import com.mongodb.MongoException; import com.mongodb.ReadPreference; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackBiFunction; @@ -32,6 +34,7 @@ import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.session.SessionContext; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -92,6 +95,8 @@ interface CommandWriteTransformer { R apply(T t, Connection connection); } + private static final BsonDocumentCodec BSON_DOCUMENT_CODEC = new BsonDocumentCodec(); + static T withReadConnectionSource(final ReadBinding binding, final CallableWithSource callable) { ConnectionSource source = binding.getReadConnectionSource(); try { @@ -172,7 +177,8 @@ static T executeRetryableRead( final Decoder decoder, final CommandReadTransformer transformer, final boolean retryReads) { - return executeRetryableRead(binding, binding::getReadConnectionSource, database, commandCreator, decoder, transformer, retryReads); + return executeRetryableRead(binding, binding::getReadConnectionSource, database, commandCreator, + decoder, transformer, retryReads); } static T executeRetryableRead( @@ -183,22 +189,38 @@ static T executeRetryableRead( final Decoder decoder, final CommandReadTransformer transformer, final boolean retryReads) { - RetryState retryState = CommandOperationHelper.initialRetryState(retryReads); + RetryState retryState = CommandOperationHelper.initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + Supplier read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> withSourceAndConnection(readConnectionSourceSupplier, false, (source, connection) -> { - retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); - return createReadCommandAndExecute(retryState, binding, source, database, commandCreator, decoder, transformer, connection); + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, database, + commandCreator, decoder, transformer, connection); }) ); return read.get(); } + @VisibleForTesting(otherwise = PRIVATE) + static T executeCommand(final WriteBinding binding, final String database, final CommandCreator commandCreator, + final CommandWriteTransformer transformer) { + return withSourceAndConnection(binding::getWriteConnectionSource, false, (source, connection) -> + transformer.apply(assertNotNull( + connection.command(database, + commandCreator.create(binding.getOperationContext(), + source.getServerDescription(), + connection.getDescription()), + new NoOpFieldNameValidator(), primary(), BSON_DOCUMENT_CODEC, binding.getOperationContext())), + connection)); + } + @VisibleForTesting(otherwise = PRIVATE) static T executeCommand(final WriteBinding binding, final String database, final BsonDocument command, final Decoder decoder, final CommandWriteTransformer transformer) { return withSourceAndConnection(binding::getWriteConnectionSource, false, (source, connection) -> transformer.apply(assertNotNull( - connection.command(database, command, new NoOpFieldNameValidator(), primary(), decoder, binding)), connection)); + connection.command(database, command, new NoOpFieldNameValidator(), primary(), decoder, + binding.getOperationContext())), connection)); } @Nullable @@ -206,7 +228,8 @@ static T executeCommand(final WriteBinding binding, final String database, f final Connection connection, final CommandWriteTransformer transformer) { notNull("binding", binding); return transformer.apply(assertNotNull( - connection.command(database, command, new NoOpFieldNameValidator(), primary(), new BsonDocumentCodec(), binding)), + connection.command(database, command, new NoOpFieldNameValidator(), primary(), BSON_DOCUMENT_CODEC, + binding.getOperationContext())), connection); } @@ -219,28 +242,30 @@ static R executeRetryableWrite( final CommandCreator commandCreator, final CommandWriteTransformer transformer, final com.mongodb.Function retryCommandModifier) { - RetryState retryState = CommandOperationHelper.initialRetryState(true); + RetryState retryState = CommandOperationHelper.initialRetryState(true, binding.getOperationContext().getTimeoutContext()); Supplier retryingWrite = decorateWriteWithRetries(retryState, binding.getOperationContext(), () -> { boolean firstAttempt = retryState.isFirstAttempt(); - if (!firstAttempt && binding.getSessionContext().hasActiveTransaction()) { - binding.getSessionContext().clearTransactionContext(); + SessionContext sessionContext = binding.getOperationContext().getSessionContext(); + if (!firstAttempt && sessionContext.hasActiveTransaction()) { + sessionContext.clearTransactionContext(); } return withSourceAndConnection(binding::getWriteConnectionSource, true, (source, connection) -> { int maxWireVersion = connection.getDescription().getMaxWireVersion(); try { - retryState.breakAndThrowIfRetryAnd(() -> !canRetryWrite(connection.getDescription(), binding.getSessionContext())); + retryState.breakAndThrowIfRetryAnd(() -> !canRetryWrite(connection.getDescription(), sessionContext)); BsonDocument command = retryState.attachment(AttachmentKeys.command()) .map(previousAttemptCommand -> { assertFalse(firstAttempt); return retryCommandModifier.apply(previousAttemptCommand); - }).orElseGet(() -> commandCreator.create(source.getServerDescription(), connection.getDescription())); + }).orElseGet(() -> commandCreator.create(binding.getOperationContext(), source.getServerDescription(), + connection.getDescription())); // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true) .attach(AttachmentKeys.retryableCommandFlag(), CommandOperationHelper.isRetryWritesEnabled(command), true) .attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false) .attach(AttachmentKeys.command(), command, false); return transformer.apply(assertNotNull(connection.command(database, command, fieldNameValidator, readPreference, - commandResultDecoder, binding)), + commandResultDecoder, binding.getOperationContext())), connection); } catch (MongoException e) { if (!firstAttempt) { @@ -260,17 +285,18 @@ static R executeRetryableWrite( @Nullable static T createReadCommandAndExecute( final RetryState retryState, - final ReadBinding binding, + final OperationContext operationContext, final ConnectionSource source, final String database, final CommandCreator commandCreator, final Decoder decoder, final CommandReadTransformer transformer, final Connection connection) { - BsonDocument command = commandCreator.create(source.getServerDescription(), connection.getDescription()); + BsonDocument command = commandCreator.create(operationContext, source.getServerDescription(), + connection.getDescription()); retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false); return transformer.apply(assertNotNull(connection.command(database, command, new NoOpFieldNameValidator(), - source.getReadPreference(), decoder, binding)), source, connection); + source.getReadPreference(), decoder, operationContext)), source, connection); } @@ -293,11 +319,11 @@ static Supplier decorateReadWithRetries(final RetryState retryState, fina } - static CommandWriteTransformer writeConcernErrorTransformer() { + static CommandWriteTransformer writeConcernErrorTransformer(final TimeoutContext timeoutContext) { return (result, connection) -> { assertNotNull(result); throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), - connection.getDescription().getMaxWireVersion()); + connection.getDescription().getMaxWireVersion(), timeoutContext); return null; }; } @@ -308,9 +334,10 @@ static CommandReadTransformer> singleBatchCurso connection.getDescription().getServerAddress()); } - static BatchCursor cursorDocumentToBatchCursor(final BsonDocument cursorDocument, final Decoder decoder, - final BsonValue comment, final ConnectionSource source, final Connection connection, final int batchSize) { - return new CommandBatchCursor<>(cursorDocument, batchSize, 0, decoder, comment, source, connection); + static BatchCursor cursorDocumentToBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument, + final int batchSize, final Decoder decoder, final BsonValue comment, final ConnectionSource source, + final Connection connection) { + return new CommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection); } private SyncOperationHelper() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java index d7134cd8ad0..73a83310d65 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java @@ -22,6 +22,7 @@ import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.BulkWriteOptions; import com.mongodb.client.model.Collation; import com.mongodb.client.model.CountOptions; @@ -45,6 +46,7 @@ import com.mongodb.client.model.WriteModel; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; @@ -58,27 +60,84 @@ import java.util.List; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + /** *

      This class is not part of the public API and may be removed or changed at any time

      */ public final class SyncOperations { private final Operations operations; + private final TimeoutSettings timeoutSettings; public SyncOperations(final Class documentClass, final ReadPreference readPreference, - final CodecRegistry codecRegistry, final boolean retryReads) { - this(null, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads); + final CodecRegistry codecRegistry, final boolean retryReads, final TimeoutSettings timeoutSettings) { + this(null, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads, timeoutSettings); } public SyncOperations(final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, - final CodecRegistry codecRegistry, final boolean retryReads) { - this(namespace, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads); + final CodecRegistry codecRegistry, final boolean retryReads, final TimeoutSettings timeoutSettings) { + this(namespace, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads, timeoutSettings); } public SyncOperations(@Nullable final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, - final boolean retryWrites, final boolean retryReads) { - this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, + final boolean retryWrites, final boolean retryReads, final TimeoutSettings timeoutSettings) { + WriteConcern writeConcernToUse = writeConcern; + if (timeoutSettings.getTimeoutMS() != null) { + writeConcernToUse = assertNotNull(WriteConcernHelper.cloneWithoutTimeout(writeConcern)); + } + this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcernToUse, retryWrites, retryReads); + this.timeoutSettings = timeoutSettings; + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final EstimatedDocumentCountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndDeleteOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndReplaceOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndUpdateOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CreateIndexOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final DropIndexOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); } public ReadOperation countDocuments(final Bson filter, final CountOptions options) { @@ -95,7 +154,7 @@ public ReadOperation> findFirst(final Bson filter } public ExplainableReadOperation> find(final Bson filter, final Class resultClass, - final FindOptions options) { + final FindOptions options) { return operations.find(filter, resultClass, options); } @@ -105,30 +164,25 @@ public ReadOperation> find(final MongoNamespace f } public ReadOperation> distinct(final String fieldName, final Bson filter, - final Class resultClass, final long maxTimeMS, + final Class resultClass, final Collation collation, final BsonValue comment) { - return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment); + return operations.distinct(fieldName, filter, resultClass, collation, comment); } public ExplainableReadOperation> aggregate(final List pipeline, - final Class resultClass, - final long maxTimeMS, final long maxAwaitTimeMS, - @Nullable final Integer batchSize, - final Collation collation, final Bson hint, - final String hintString, - final BsonValue comment, - final Bson variables, - final Boolean allowDiskUse, - final AggregationLevel aggregationLevel) { - return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, batchSize, collation, hint, hintString, comment, - variables, allowDiskUse, aggregationLevel); - } - - public ReadOperation aggregateToCollection(final List pipeline, final long maxTimeMS, - final Boolean allowDiskUse, final Boolean bypassDocumentValidation, - final Collation collation, final Bson hint, final String hintString, final BsonValue comment, + final Class resultClass, + @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize, + final Collation collation, final Bson hint, final String hintString, final BsonValue comment, final Bson variables, + final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { + return operations.aggregate(pipeline, resultClass, timeoutMode, batchSize, collation, hint, hintString, + comment, variables, allowDiskUse, aggregationLevel); + } + + public AggregateToCollectionOperation aggregateToCollection(final List pipeline, + @Nullable final TimeoutMode timeoutMode, final Boolean allowDiskUse, final Boolean bypassDocumentValidation, + final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { - return operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, + return operations.aggregateToCollection(pipeline, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel); } @@ -136,21 +190,21 @@ public ReadOperation aggregateToCollection(final List pipe public WriteOperation mapReduceToCollection(final String databaseName, final String collectionName, final String mapFunction, final String reduceFunction, final String finalizeFunction, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final com.mongodb.client.model.MapReduceAction action, final Boolean bypassDocumentValidation, final Collation collation) { return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, limit, - maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); + jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); } public ReadOperation> mapReduce(final String mapFunction, final String reduceFunction, final String finalizeFunction, final Class resultClass, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final Collation collation) { - return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, maxTimeMS, jsMode, scope, + return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, jsMode, scope, sort, verbose, collation); } @@ -225,7 +279,6 @@ public WriteOperation dropDatabase() { return operations.dropDatabase(); } - public WriteOperation createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { return operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings); @@ -263,14 +316,9 @@ public WriteOperation dropSearchIndex(final String indexName) { public ExplainableReadOperation> listSearchIndexes(final Class resultClass, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse) { - return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, batchSize, collation, - comment, allowDiskUse); + @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { + return operations.listSearchIndexes(resultClass, indexName, batchSize, collation, comment, allowDiskUse); } public WriteOperation dropIndex(final String indexName, final DropIndexOptions options) { @@ -284,29 +332,30 @@ public WriteOperation dropIndex(final Bson keys, final DropIndexOptions op public ReadOperation> listCollections(final String databaseName, final Class resultClass, final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, - @Nullable final Integer batchSize, final long maxTimeMS, - final BsonValue comment) { + @Nullable final Integer batchSize, + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections, - batchSize, maxTimeMS, comment); + batchSize, comment, timeoutMode); + } public ReadOperation> listDatabases(final Class resultClass, final Bson filter, - final Boolean nameOnly, final long maxTimeMS, + final Boolean nameOnly, final Boolean authorizedDatabases, final BsonValue comment) { - return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabases, comment); + return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabases, comment); } public ReadOperation> listIndexes(final Class resultClass, @Nullable final Integer batchSize, - final long maxTimeMS, final BsonValue comment) { - return operations.listIndexes(resultClass, batchSize, maxTimeMS, comment); + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return operations.listIndexes(resultClass, batchSize, comment, timeoutMode); } public ReadOperation> changeStream(final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize, final Collation collation, - final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, + final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, decoder, changeStreamLevel, batchSize, - collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java index 499623ebcce..3bb04efa8ed 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java @@ -18,6 +18,7 @@ import com.mongodb.Function; import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -54,21 +55,25 @@ public WriteConcern getWriteConcern() { @Override public Void execute(final WriteBinding binding) { - isTrue("in transaction", binding.getSessionContext().hasActiveTransaction()); + isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction()); + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); return executeRetryableWrite(binding, "admin", null, new NoOpFieldNameValidator(), - new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformer(), getRetryCommandModifier()); + new BsonDocumentCodec(), getCommandCreator(), + writeConcernErrorTransformer(timeoutContext), getRetryCommandModifier(timeoutContext)); } @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { - isTrue("in transaction", binding.getSessionContext().hasActiveTransaction()); + isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction()); + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); executeRetryableWriteAsync(binding, "admin", null, new NoOpFieldNameValidator(), - new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformerAsync(), getRetryCommandModifier(), - errorHandlingCallback(callback, LOGGER)); + new BsonDocumentCodec(), getCommandCreator(), + writeConcernErrorTransformerAsync(timeoutContext), getRetryCommandModifier(timeoutContext), + errorHandlingCallback(callback, LOGGER)); } CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> { + return (operationContext, serverDescription, connectionDescription) -> { BsonDocument command = new BsonDocument(getCommandName(), new BsonInt32(1)); if (!writeConcern.isServerDefault()) { command.put("writeConcern", writeConcern.asDocument()); @@ -84,5 +89,5 @@ CommandCreator getCommandCreator() { */ protected abstract String getCommandName(); - protected abstract Function getRetryCommandModifier(); + protected abstract Function getRetryCommandModifier(TimeoutContext timeoutContext); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java b/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java index a9e1a1e8ee6..10b02eda4fe 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java @@ -22,11 +22,14 @@ import com.mongodb.WriteConcern; import com.mongodb.WriteConcernResult; import com.mongodb.bulk.WriteConcernError; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.connection.ProtocolHelper; +import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonString; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static com.mongodb.internal.operation.CommandOperationHelper.addRetryableWriteErrorLabel; @@ -41,10 +44,26 @@ public static void appendWriteConcernToCommand(final WriteConcern writeConcern, commandDocument.put("writeConcern", writeConcern.asDocument()); } } + @Nullable + public static WriteConcern cloneWithoutTimeout(@Nullable final WriteConcern writeConcern) { + if (writeConcern == null || writeConcern.getWTimeout(TimeUnit.MILLISECONDS) == null) { + return writeConcern; + } + + WriteConcern mapped; + Object w = writeConcern.getWObject(); + if (w == null) { + mapped = WriteConcern.ACKNOWLEDGED; + } else { + mapped = w instanceof Integer ? new WriteConcern((Integer) w) : new WriteConcern((String) w); + } + return mapped.withJournal(writeConcern.getJournal()); + } - public static void throwOnWriteConcernError(final BsonDocument result, final ServerAddress serverAddress, final int maxWireVersion) { + public static void throwOnWriteConcernError(final BsonDocument result, final ServerAddress serverAddress, + final int maxWireVersion, final TimeoutContext timeoutContext) { if (hasWriteConcernError(result)) { - MongoException exception = ProtocolHelper.createSpecialException(result, serverAddress, "errmsg"); + MongoException exception = ProtocolHelper.createSpecialException(result, serverAddress, "errmsg", timeoutContext); if (exception == null) { exception = createWriteConcernException(result, serverAddress); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java index a2e34985179..1a4fee36e1c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java @@ -24,6 +24,7 @@ *

      This class is not part of the public API and may be removed or changed at any time

      */ public interface WriteOperation { + /** * General execute which can return anything of type T * diff --git a/driver-core/src/main/com/mongodb/internal/package-info.java b/driver-core/src/main/com/mongodb/internal/package-info.java index 2f7f9b396cf..e7825fe1292 100644 --- a/driver-core/src/main/com/mongodb/internal/package-info.java +++ b/driver-core/src/main/com/mongodb/internal/package-info.java @@ -15,7 +15,6 @@ */ /** - * This package contains classes that manage binding to MongoDB servers for various operations. */ @NonNullApi diff --git a/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java b/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java index ca2023b4d3d..80f88cc08f5 100644 --- a/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java +++ b/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java @@ -19,6 +19,10 @@ import com.mongodb.ClientSessionOptions; import com.mongodb.MongoClientException; import com.mongodb.ServerAddress; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.ReferenceCounted; import com.mongodb.lang.Nullable; import com.mongodb.session.ClientSession; @@ -26,10 +30,12 @@ import org.bson.BsonDocument; import org.bson.BsonTimestamp; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.isTrue; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      This class is not part of the public API and may be removed or changed at any time

      @@ -48,6 +54,16 @@ public class BaseClientSessionImpl implements ClientSession { private ServerAddress pinnedServerAddress; private BsonDocument recoveryToken; private ReferenceCounted transactionContext; + @Nullable + private TimeoutContext timeoutContext; + + protected static boolean hasTimeoutMS(@Nullable final TimeoutContext timeoutContext) { + return timeoutContext != null && timeoutContext.hasTimeoutMS(); + } + + protected static boolean hasWTimeoutMS(@Nullable final WriteConcern writeConcern) { + return writeConcern != null && writeConcern.getWTimeout(TimeUnit.MILLISECONDS) != null; + } public BaseClientSessionImpl(final ServerSessionPool serverSessionPool, final Object originator, final ClientSessionOptions options) { this.serverSessionPool = serverSessionPool; @@ -193,4 +209,37 @@ public void close() { clearTransactionContext(); } } + + @Override + @Nullable + public TimeoutContext getTimeoutContext() { + return timeoutContext; + } + + protected void setTimeoutContext(@Nullable final TimeoutContext timeoutContext) { + this.timeoutContext = timeoutContext; + } + + protected void resetTimeout() { + if (timeoutContext != null) { + timeoutContext.resetTimeoutIfPresent(); + } + } + + protected TimeoutSettings getTimeoutSettings(final TransactionOptions transactionOptions, final TimeoutSettings timeoutSettings) { + Long transactionTimeoutMS = transactionOptions.getTimeout(MILLISECONDS); + Long defaultTimeoutMS = getOptions().getDefaultTimeout(MILLISECONDS); + Long clientTimeoutMS = timeoutSettings.getTimeoutMS(); + + Long timeoutMS = transactionTimeoutMS != null ? transactionTimeoutMS + : defaultTimeoutMS != null ? defaultTimeoutMS : clientTimeoutMS; + + return timeoutSettings + .withMaxCommitMS(transactionOptions.getMaxCommitTime(MILLISECONDS)) + .withTimeout(timeoutMS, MILLISECONDS); + } + + protected enum TransactionState { + NONE, IN, COMMITTED, ABORTED + } } diff --git a/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java index 35268e68f13..6f118f0eddb 100644 --- a/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java +++ b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java @@ -22,7 +22,8 @@ import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.IgnorableRequestContext; -import com.mongodb.internal.binding.StaticBindingContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.NoOpSessionContext; @@ -59,21 +60,26 @@ public class ServerSessionPool { private final Cluster cluster; private final ServerSessionPool.Clock clock; private volatile boolean closed; - @Nullable - private final ServerApi serverApi; + private final OperationContext operationContext; private final LongAdder inUseCount = new LongAdder(); interface Clock { long millis(); } - public ServerSessionPool(final Cluster cluster, @Nullable final ServerApi serverApi) { - this(cluster, serverApi, System::currentTimeMillis); + public ServerSessionPool(final Cluster cluster, final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) { + this(cluster, + new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(timeoutSettings.connectionOnly()), serverApi)); } - public ServerSessionPool(final Cluster cluster, @Nullable final ServerApi serverApi, final Clock clock) { + public ServerSessionPool(final Cluster cluster, final OperationContext operationContext) { + this(cluster, operationContext, System::currentTimeMillis); + } + + public ServerSessionPool(final Cluster cluster, final OperationContext operationContext, final Clock clock) { this.cluster = cluster; - this.serverApi = serverApi; + this.operationContext = operationContext; this.clock = clock; } @@ -128,8 +134,6 @@ private void endClosedSessions() { Connection connection = null; try { - StaticBindingContext context = new StaticBindingContext(NoOpSessionContext.INSTANCE, serverApi, - IgnorableRequestContext.INSTANCE, new OperationContext()); connection = cluster.selectServer( new ServerSelector() { @Override @@ -149,11 +153,11 @@ public String toString() { + '}'; } }, - context.getOperationContext()).getServer().getConnection(context.getOperationContext()); + operationContext).getServer().getConnection(operationContext); connection.command("admin", new BsonDocument("endSessions", new BsonArray(identifiers)), new NoOpFieldNameValidator(), - ReadPreference.primaryPreferred(), new BsonDocumentCodec(), context); + ReadPreference.primaryPreferred(), new BsonDocumentCodec(), operationContext); } catch (MongoException e) { // ignore exceptions } finally { diff --git a/driver-core/src/main/com/mongodb/internal/time/StartTime.java b/driver-core/src/main/com/mongodb/internal/time/StartTime.java new file mode 100644 index 00000000000..905af2265d9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/time/StartTime.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.time; + +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +/** + * A point in time used to track how much time has elapsed. In contrast to a + * Timeout, it is guaranteed to not be in the future, and is never infinite. + * + * @see TimePoint + */ +public interface StartTime { + + /** + * @see TimePoint#elapsed() + */ + Duration elapsed(); + + /** + * @see TimePoint#asTimeout() + */ + Timeout asTimeout(); + + /** + * Returns an {@linkplain Timeout#infinite() infinite} timeout if + * {@code timeoutValue} is negative, an expired timeout if + * {@code timeoutValue} is 0, otherwise a timeout in {@code durationNanos}. + *

      + * Note that some code might ignore a timeout, and attempt to perform + * the operation in question at least once.

      + *

      + * Note that the contract of this method is also used in some places to + * specify the behavior of methods that accept {@code (long timeout, TimeUnit unit)}, + * e.g., {@link com.mongodb.internal.connection.ConcurrentPool#get(long, TimeUnit)}, + * so it cannot be changed without updating those methods.

      + * + * @see TimePoint#timeoutAfterOrInfiniteIfNegative(long, TimeUnit) + */ + Timeout timeoutAfterOrInfiniteIfNegative(long timeoutValue, TimeUnit timeUnit); + + /** + * @return a StartPoint, as of now + */ + static StartTime now() { + return TimePoint.at(System.nanoTime()); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/time/TimePoint.java b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java index 78859802150..102dfb2d609 100644 --- a/driver-core/src/main/com/mongodb/internal/time/TimePoint.java +++ b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java @@ -17,74 +17,183 @@ import com.mongodb.annotations.Immutable; import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.function.CheckedFunction; +import com.mongodb.internal.function.CheckedSupplier; +import com.mongodb.lang.Nullable; import java.time.Clock; import java.time.Duration; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static java.util.concurrent.TimeUnit.NANOSECONDS; /** * A
      value-based class - * representing a point on a timeline. The origin of this timeline has no known relation to the - * {@linkplain Clock#systemUTC() system clock}. The same timeline is used by all {@link TimePoint}s within the same process. + * representing a point on a timeline. The origin of this timeline (which is not + * exposed) has no relation to the {@linkplain Clock#systemUTC() system clock}. + * The same timeline is used by all {@link TimePoint}s within the same process. *

      * Methods operating on a pair of {@link TimePoint}s, * for example, {@link #durationSince(TimePoint)}, {@link #compareTo(TimePoint)}, * or producing a point from another one, for example, {@link #add(Duration)}, - * work correctly only if the duration between the points is not greater than {@link Long#MAX_VALUE} nanoseconds, - * which is more than 292 years.

      + * work correctly only if the duration between the points is not greater than + * {@link Long#MAX_VALUE} nanoseconds, which is more than 292 years.

      *

      * This class is not part of the public API and may be removed or changed at any time.

      */ @Immutable -public final class TimePoint implements Comparable { - private final long nanos; +class TimePoint implements Comparable, StartTime, Timeout { + @Nullable + private final Long nanos; - private TimePoint(final long nanos) { + TimePoint(@Nullable final Long nanos) { this.nanos = nanos; } + @VisibleForTesting(otherwise = PRIVATE) + static TimePoint at(@Nullable final Long nanos) { + return new TimePoint(nanos); + } + + @VisibleForTesting(otherwise = PRIVATE) + long currentNanos() { + return System.nanoTime(); + } + /** * Returns the current {@link TimePoint}. */ - public static TimePoint now() { + static TimePoint now() { return at(System.nanoTime()); } - @VisibleForTesting(otherwise = PRIVATE) - static TimePoint at(final long nanos) { - return new TimePoint(nanos); + /** + * Returns a {@link TimePoint} infinitely far in the future. + */ + static TimePoint infinite() { + return at(null); + } + + @Override + public Timeout shortenBy(final long amount, final TimeUnit timeUnit) { + if (isInfinite()) { + return this; // shortening (lengthening) an infinite timeout does nothing + } + long durationNanos = NANOSECONDS.convert(amount, timeUnit); + return TimePoint.at(assertNotNull(nanos) - durationNanos); + } + + @Override + public T checkedCall(final TimeUnit timeUnit, + final CheckedSupplier onInfinite, final CheckedFunction onHasRemaining, + final CheckedSupplier onExpired) throws E { + if (this.isInfinite()) { + return onInfinite.get(); + } + long remaining = remaining(timeUnit); + if (remaining <= 0) { + return onExpired.get(); + } else { + return onHasRemaining.apply(remaining); + } } /** - * The {@link Duration} between this {@link TimePoint} and {@code t}. - * A {@linkplain Duration#isNegative() negative} {@link Duration} means that - * this {@link TimePoint} is {@linkplain #compareTo(TimePoint) before} {@code t}. + * @return true if this timepoint is infinite. + */ + private boolean isInfinite() { + return nanos == null; + } + + /** + * @return this TimePoint, as a Timeout. Convenience for {@link StartTime} + */ + @Override + public Timeout asTimeout() { + return this; + } + + /** + * The number of whole time units that remain until this TimePoint + * has expired. This should not be used to check for expiry, + * but can be used to supply a remaining value, in the finest-grained + * TimeUnit available, to some method that may time out. + * This method must not be used with infinite TimePoints. * - * @see #elapsed() + * @param unit the time unit + * @return the remaining time + * @throws AssertionError if the timeout is infinite. Always check if the + * timeout {@link #isInfinite()} before calling. */ - public Duration durationSince(final TimePoint t) { - return Duration.ofNanos(nanos - t.nanos); + private long remaining(final TimeUnit unit) { + if (isInfinite()) { + throw new AssertionError("Infinite TimePoints have infinite remaining time"); + } + long remaining = assertNotNull(nanos) - currentNanos(); + remaining = unit.convert(remaining, NANOSECONDS); + return remaining <= 0 ? 0 : remaining; } /** * The {@link Duration} between {@link TimePoint#now()} and this {@link TimePoint}. * This method is functionally equivalent to {@code TimePoint.now().durationSince(this)}. + * Note that the duration will represent fully-elapsed whole units. * + * @throws AssertionError If this TimePoint is {@linkplain #isInfinite() infinite}. * @see #durationSince(TimePoint) */ public Duration elapsed() { - return Duration.ofNanos(System.nanoTime() - nanos); + if (isInfinite()) { + throw new AssertionError("No time can elapse since an infinite TimePoint"); + } + return Duration.ofNanos(currentNanos() - assertNotNull(nanos)); } + /** + * The {@link Duration} between this {@link TimePoint} and {@code t}. + * A {@linkplain Duration#isNegative() negative} {@link Duration} means that + * this {@link TimePoint} is {@linkplain #compareTo(TimePoint) before} {@code t}. + * + * @see #elapsed() + */ + Duration durationSince(final TimePoint t) { + if (this.isInfinite()) { + throw new AssertionError("this timepoint is infinite, with no duration since"); + } + if (t.isInfinite()) { + throw new AssertionError("the other timepoint is infinite, with no duration until"); + } + return Duration.ofNanos(nanos - assertNotNull(t.nanos)); + } + + /** + * @param timeoutValue value; if negative, the result is infinite + * @param timeUnit timeUnit + * @return a TimePoint that is the given number of timeUnits in the future + */ + @Override + public TimePoint timeoutAfterOrInfiniteIfNegative(final long timeoutValue, final TimeUnit timeUnit) { + if (timeoutValue < 0) { + return infinite(); + } + return this.add(Duration.ofNanos(NANOSECONDS.convert(timeoutValue, timeUnit))); + } + + /** * Returns a {@link TimePoint} that is {@code duration} away from this one. * * @param duration A duration that may also be {@linkplain Duration#isNegative() negative}. */ - public TimePoint add(final Duration duration) { + TimePoint add(final Duration duration) { + if (isInfinite()) { + throw new AssertionError("No time can be added to an infinite TimePoint"); + } long durationNanos = duration.toNanos(); - return TimePoint.at(nanos + durationNanos); + return TimePoint.at(assertNotNull(nanos) + durationNanos); } /** @@ -94,7 +203,14 @@ public TimePoint add(final Duration duration) { */ @Override public int compareTo(final TimePoint t) { - return Long.signum(nanos - t.nanos); + if (Objects.equals(nanos, t.nanos)) { + return 0; + } else if (this.isInfinite()) { + return 1; + } else if (t.isInfinite()) { + return -1; + } + return Long.signum(nanos - assertNotNull(t.nanos)); } @Override @@ -106,18 +222,22 @@ public boolean equals(final Object o) { return false; } final TimePoint timePoint = (TimePoint) o; - return nanos == timePoint.nanos; + return Objects.equals(nanos, timePoint.nanos); } @Override public int hashCode() { - return Long.hashCode(nanos); + return Objects.hash(nanos); } @Override public String toString() { + String remainingMs = isInfinite() + ? "infinite" + : "" + TimeUnit.MILLISECONDS.convert(currentNanos() - assertNotNull(nanos), NANOSECONDS); return "TimePoint{" + "nanos=" + nanos + + "remainingMs=" + remainingMs + '}'; } } diff --git a/driver-core/src/main/com/mongodb/internal/time/Timeout.java b/driver-core/src/main/com/mongodb/internal/time/Timeout.java index f0d4bbf3ea1..85b92d9fde1 100644 --- a/driver-core/src/main/com/mongodb/internal/time/Timeout.java +++ b/driver-core/src/main/com/mongodb/internal/time/Timeout.java @@ -15,245 +15,229 @@ */ package com.mongodb.internal.time; -import com.mongodb.annotations.Immutable; -import com.mongodb.internal.VisibleForTesting; +import com.mongodb.MongoInterruptedException; +import com.mongodb.assertions.Assertions; +import com.mongodb.internal.function.CheckedConsumer; +import com.mongodb.internal.function.CheckedFunction; +import com.mongodb.internal.function.CheckedRunnable; +import com.mongodb.internal.function.CheckedSupplier; import com.mongodb.lang.Nullable; +import org.jetbrains.annotations.NotNull; -import java.util.Objects; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.function.LongConsumer; +import java.util.function.LongFunction; +import java.util.function.Supplier; -import static com.mongodb.assertions.Assertions.assertFalse; -import static com.mongodb.assertions.Assertions.assertNotNull; -import static com.mongodb.assertions.Assertions.assertTrue; -import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; -import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static java.util.concurrent.TimeUnit.NANOSECONDS; /** - * A value-based class - * for tracking timeouts. - *

      - * This class is not part of the public API and may be removed or changed at any time.

      + * A Timeout is a "deadline", point in time by which something must happen. + * + * @see TimePoint */ -@Immutable -public final class Timeout { - private static final Timeout INFINITE = new Timeout(-1, null); - private static final Timeout IMMEDIATE = new Timeout(0, null); - - private final long durationNanos; - /** - * {@code null} iff {@code this} is {@linkplain #isInfinite() infinite} or {@linkplain #isImmediate() immediate}. - */ - @Nullable - private final TimePoint start; - - private Timeout(final long durationNanos, @Nullable final TimePoint start) { - this.durationNanos = durationNanos; - this.start = start; - } - - /** - * Converts the specified {@code duration} from {@code unit}s to {@link TimeUnit#NANOSECONDS} - * as specified by {@link TimeUnit#toNanos(long)} and then acts identically to {@link #started(long, TimePoint)}. - *

      - * Note that the contract of this method is also used in some places to specify the behavior of methods that accept - * {@code (long timeout, TimeUnit unit)}, e.g., {@link com.mongodb.internal.connection.ConcurrentPool#get(long, TimeUnit)}, - * so it cannot be changed without updating those methods.

      - */ - public static Timeout started(final long duration, final TimeUnit unit, final TimePoint at) { - return started(unit.toNanos(duration), assertNotNull(at)); - } - - /** - * Returns an {@linkplain #isInfinite() infinite} timeout if {@code durationNanos} is either negative - * or is equal to {@link Long#MAX_VALUE}, - * an {@linkplain #isImmediate() immediate} timeout if {@code durationNanos} is 0, - * otherwise a timeout of {@code durationNanos}. - *

      - * Note that the contract of this method is also used in some places to specify the behavior of methods that accept - * {@code (long timeout, TimeUnit unit)}, e.g., {@link com.mongodb.internal.connection.ConcurrentPool#get(long, TimeUnit)}, - * so it cannot be changed without updating those methods.

      - */ - public static Timeout started(final long durationNanos, final TimePoint at) { - if (durationNanos < 0 || durationNanos == Long.MAX_VALUE) { - return infinite(); - } else if (durationNanos == 0) { - return immediate(); - } else { - return new Timeout(durationNanos, assertNotNull(at)); - } - } - +public interface Timeout { /** - * This method acts identically to {@link #started(long, TimeUnit, TimePoint)} - * with the {@linkplain TimePoint#now() current} {@link TimePoint} passed to it. + * @param timeouts the timeouts + * @return the instance of the timeout that expires earliest */ - public static Timeout startNow(final long duration, final TimeUnit unit) { - return started(duration, unit, TimePoint.now()); + static Timeout earliest(final Timeout... timeouts) { + List list = Arrays.asList(timeouts); + list.forEach(v -> { + if (!(v instanceof TimePoint)) { + throw new AssertionError("Only TimePoints may be compared"); + } + }); + return Collections.min(list, (a, b) -> { + TimePoint tpa = (TimePoint) a; + TimePoint tpb = (TimePoint) b; + return tpa.compareTo(tpb); + }); } /** - * This method acts identically to {@link #started(long, TimePoint)} - * with the {@linkplain TimePoint#now() current} {@link TimePoint} passed to it. + * @return an infinite (non-expiring) timeout */ - public static Timeout startNow(final long durationNanos) { - return started(durationNanos, TimePoint.now()); + static Timeout infinite() { + return TimePoint.infinite(); } /** - * @see #started(long, TimePoint) + * @param timeout the timeout + * @return the provided timeout, or an infinite timeout if provided null. */ - public static Timeout infinite() { - return INFINITE; + static Timeout nullAsInfinite(@Nullable final Timeout timeout) { + return timeout == null ? infinite() : timeout; } /** - * @see #started(long, TimePoint) + * @param duration the non-negative duration, in the specified time unit + * @param unit the time unit + * @param zeroSemantics what to interpret a 0 duration as (infinite or expired) + * @return a timeout that expires in the specified duration after now. */ - public static Timeout immediate() { - return IMMEDIATE; - } - - /** - * Returns 0 or a positive value. - * 0 means that the timeout has expired. - * - * @throws AssertionError If the timeout is {@linkplain #isInfinite() infinite} or {@linkplain #isImmediate() immediate}. - */ - @VisibleForTesting(otherwise = PRIVATE) - long remainingNanos(final TimePoint now) { - return Math.max(0, durationNanos - now.durationSince(assertNotNull(start)).toNanos()); + @NotNull + static Timeout expiresIn(final long duration, final TimeUnit unit, final ZeroSemantics zeroSemantics) { + if (duration < 0) { + throw new AssertionError("Timeouts must not be in the past"); + } else if (duration == 0) { + switch (zeroSemantics) { + case ZERO_DURATION_MEANS_INFINITE: + return Timeout.infinite(); + case ZERO_DURATION_MEANS_EXPIRED: + return TimePoint.now(); + default: + throw Assertions.fail("Unknown enum value"); + } + } else { + // duration will never be negative + return TimePoint.now().timeoutAfterOrInfiniteIfNegative(duration, unit); + } } /** - * Returns 0 or a positive value converted to the specified {@code unit}s. - * Use {@link #expired(long)} to check if the returned value signifies that a timeout is expired. - * - * @param unit If not {@link TimeUnit#NANOSECONDS}, then coarsening conversion is done that may result in returning a value - * that represents a longer time duration than is actually remaining (this is done to prevent treating a timeout as - * {@linkplain #expired(long) expired} when it is not). Consequently, one should specify {@code unit} as small as - * practically possible. Such rounding up happens if and only if the remaining time cannot be - * represented exactly as an integral number of the {@code unit}s specified. It may result in - * {@link #expired()} returning {@code true} and after that (in the happens-before order) - * {@link #expired(long) expired}{@code (}{@link #remaining(TimeUnit) remaining(...)}{@code )} - * returning {@code false}. If such a discrepancy is observed, - * the result of the {@link #expired()} method should be preferred. + * This timeout, shortened by the provided amount (it will expire sooner). * - * @throws AssertionError If the timeout is {@linkplain #isInfinite() infinite}. - * @see #remainingOrInfinite(TimeUnit) - */ - public long remaining(final TimeUnit unit) { - assertFalse(isInfinite()); - return isImmediate() ? 0 : convertRoundUp(remainingNanos(TimePoint.now()), unit); + * @param amount the amount to shorten by + * @param timeUnit the time unit of the amount + * @return the shortened timeout + */ + Timeout shortenBy(long amount, TimeUnit timeUnit); + + /** + * {@linkplain Condition#awaitNanos(long) Awaits} on the provided + * condition. Will {@linkplain Condition#await() await} without a waiting + * time if this timeout is infinite. + * {@linkplain #onExistsAndExpired(Timeout, Runnable) Expiry} is not + * checked by this method, and should be called outside of this method. + * @param condition the condition. + * @param action supplies the name of the action, for {@link MongoInterruptedException} + */ + default void awaitOn(final Condition condition, final Supplier action) { + try { + // ignore result, the timeout will track this remaining time + //noinspection ResultOfMethodCallIgnored + checkedRun(NANOSECONDS, + () -> condition.await(), + (ns) -> condition.awaitNanos(ns), + () -> condition.awaitNanos(0)); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted while " + action.get(), e); + } } /** - * Returns a negative value for {@linkplain #isInfinite() infinite} timeouts, - * otherwise behaves identically to {@link #remaining(TimeUnit)}. - * Use {@link #expired(long)} to check if the returned value signifies that a timeout is expired. - * - * @see #remaining(TimeUnit) - */ - public long remainingOrInfinite(final TimeUnit unit) { - return isInfinite() ? -1 : remaining(unit); + * {@linkplain CountDownLatch#await(long, TimeUnit) Awaits} on the provided + * condition. Will {@linkplain CountDownLatch#await() await} without a waiting + * time if this timeout is infinite. + * {@linkplain #onExistsAndExpired(Timeout, Runnable) Expiry} is not + * checked by this method, and should be called outside of this method. + * @param latch the latch. + * @param action supplies the name of the action, for {@link MongoInterruptedException} + */ + default void awaitOn(final CountDownLatch latch, final Supplier action) { + try { + // ignore result, the timeout will track this remaining time + //noinspection ResultOfMethodCallIgnored + checkedRun(NANOSECONDS, + () -> latch.await(), + (ns) -> latch.await(ns, NANOSECONDS), + () -> latch.await(0, NANOSECONDS)); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted while " + action.get(), e); + } } /** - * @see #expired(long) + * Call one of 3 possible branches depending on the state of the timeout, + * and return the result. + * @param timeUnit the positive (non-zero) remaining time to provide to the + * {@code onHasRemaining} branch. The underlying nano time + * is rounded down to the given time unit. If 0, the timeout + * is considered expired. + * @param onInfinite branch to take when the timeout is infinite + * @param onHasRemaining branch to take when there is positive remaining + * time in the specified time unit + * @param onExpired branch to take when the timeout is expired + * @return the result provided by the branch + * @param the type of the result */ - public boolean expired() { - return expired(remainingOrInfinite(NANOSECONDS)); + default T call(final TimeUnit timeUnit, + final Supplier onInfinite, final LongFunction onHasRemaining, + final Supplier onExpired) { + return checkedCall(timeUnit, onInfinite::get, onHasRemaining::apply, onExpired::get); } /** - * Returns {@code true} if and only if the {@code remaining} time is 0 (the time unit is irrelevant). - * - * @see #remaining(TimeUnit) - * @see #remainingOrInfinite(TimeUnit) - * @see #expired() + * Call, but throwing a checked exception. + * @see #call(TimeUnit, Supplier, LongFunction, Supplier) + * @param the checked exception type + * @throws E the checked exception */ - public static boolean expired(final long remaining) { - return remaining == 0; - } + T checkedCall(TimeUnit timeUnit, + CheckedSupplier onInfinite, CheckedFunction onHasRemaining, + CheckedSupplier onExpired) throws E; /** - * @return {@code true} if and only if the timeout duration is considered to be infinite. + * Run one of 3 possible branches depending on the state of the timeout. + * @see #call(TimeUnit, Supplier, LongFunction, Supplier) */ - public boolean isInfinite() { - return equals(INFINITE); + default void run(final TimeUnit timeUnit, + final Runnable onInfinite, final LongConsumer onHasRemaining, + final Runnable onExpired) { + this.call(timeUnit, () -> { + onInfinite.run(); + return null; + }, (t) -> { + onHasRemaining.accept(t); + return null; + }, () -> { + onExpired.run(); + return null; + }); } /** - * @return {@code true} if and only if the timeout duration is 0. + * Run, but throwing a checked exception. + * @see #checkedCall(TimeUnit, CheckedSupplier, CheckedFunction, CheckedSupplier) */ - public boolean isImmediate() { - return equals(IMMEDIATE); + default void checkedRun(final TimeUnit timeUnit, + final CheckedRunnable onInfinite, final CheckedConsumer onHasRemaining, + final CheckedRunnable onExpired) throws E { + this.checkedCall(timeUnit, () -> { + onInfinite.run(); + return null; + }, (t) -> { + onHasRemaining.accept(t); + return null; + }, () -> { + onExpired.run(); + return null; + }); } - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Timeout other = (Timeout) o; - return durationNanos == other.durationNanos && Objects.equals(start, other.start()); - } - - @Override - public int hashCode() { - return Objects.hash(durationNanos, start); + default void onExpired(final Runnable onExpired) { + onExistsAndExpired(this, onExpired); } - /** - * This method is useful for debugging. - * - * @see #toUserString() - */ - @Override - public String toString() { - return "Timeout{" - + "durationNanos=" + durationNanos - + ", start=" + start - + '}'; - } - - /** - * Returns a user-friendly representation. Examples: 1500 ms, infinite, 0 ms (immediate). - * - * @see #toString() - */ - public String toUserString() { - if (isInfinite()) { - return "infinite"; - } else if (isImmediate()) { - return "0 ms (immediate)"; - } else { - return convertRoundUp(durationNanos, MILLISECONDS) + " ms"; + static void onExistsAndExpired(@Nullable final Timeout t, final Runnable onExpired) { + if (t == null) { + return; } + t.run(NANOSECONDS, + () -> {}, + (ns) -> {}, + () -> onExpired.run()); } - @VisibleForTesting(otherwise = PRIVATE) - long durationNanos() { - return durationNanos; - } - - @VisibleForTesting(otherwise = PRIVATE) - @Nullable - TimePoint start() { - return start; - } - - @VisibleForTesting(otherwise = PRIVATE) - static long convertRoundUp(final long nonNegativeNanos, final TimeUnit unit) { - assertTrue(nonNegativeNanos >= 0); - if (unit == NANOSECONDS) { - return nonNegativeNanos; - } else { - long trimmed = unit.convert(nonNegativeNanos, NANOSECONDS); - return NANOSECONDS.convert(trimmed, unit) < nonNegativeNanos ? trimmed + 1 : trimmed; - } + enum ZeroSemantics { + ZERO_DURATION_MEANS_EXPIRED, + ZERO_DURATION_MEANS_INFINITE } } diff --git a/driver-core/src/main/com/mongodb/session/ClientSession.java b/driver-core/src/main/com/mongodb/session/ClientSession.java index c6f4c8dcb60..072e6d90905 100644 --- a/driver-core/src/main/com/mongodb/session/ClientSession.java +++ b/driver-core/src/main/com/mongodb/session/ClientSession.java @@ -19,6 +19,7 @@ import com.mongodb.ClientSessionOptions; import com.mongodb.ServerAddress; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.TimeoutContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonTimestamp; @@ -168,4 +169,18 @@ public interface ClientSession extends Closeable { @Override void close(); + + /** + * Gets the timeout context to use with this session: + * + *
        + *
      • {@code MongoClientSettings#getTimeoutMS}
      • + *
      • {@code ClientSessionOptions#getDefaultTimeout}
      • + *
      + *

      For internal use only

      + * @return the timeout to use + * @since 5.2 + */ + @Nullable + TimeoutContext getTimeoutContext(); } diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java index 920a2c2ac09..a889856f394 100644 --- a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java +++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java @@ -30,16 +30,20 @@ import com.mongodb.connection.SslSettings; import com.mongodb.connection.TransportSettings; import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncClusterBinding; import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.AsyncOperationContextBinding; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.AsyncReadWriteBinding; import com.mongodb.internal.binding.AsyncSessionBinding; import com.mongodb.internal.binding.AsyncSingleConnectionBinding; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.ClusterBinding; +import com.mongodb.internal.binding.OperationContextBinding; import com.mongodb.internal.binding.ReadWriteBinding; import com.mongodb.internal.binding.ReferenceCounted; import com.mongodb.internal.binding.SessionBinding; @@ -50,7 +54,10 @@ import com.mongodb.internal.connection.DefaultClusterFactory; import com.mongodb.internal.connection.DefaultInetAddressResolver; import com.mongodb.internal.connection.InternalConnectionPoolSettings; +import com.mongodb.internal.connection.InternalOperationContextFactory; import com.mongodb.internal.connection.MongoCredentialWithCache; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; import com.mongodb.internal.connection.SocketStreamFactory; import com.mongodb.internal.connection.StreamFactory; import com.mongodb.internal.connection.StreamFactoryFactory; @@ -94,9 +101,10 @@ import static com.mongodb.internal.connection.ClusterDescriptionHelper.getSecondaries; import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static java.lang.String.format; -import static java.lang.Thread.sleep; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assume.assumeThat; @@ -118,7 +126,20 @@ public final class ClusterFixture { private static final String DEFAULT_DATABASE_NAME = "JavaDriverTest"; private static final int COMMAND_NOT_FOUND_ERROR_CODE = 59; public static final long TIMEOUT = 60L; - public static final Duration TIMEOUT_DURATION = Duration.ofMinutes(1); + public static final Duration TIMEOUT_DURATION = Duration.ofSeconds(TIMEOUT); + + public static final TimeoutSettings TIMEOUT_SETTINGS = new TimeoutSettings(30_000, 10_000, 0, null, SECONDS.toMillis(5)); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_TIMEOUT = TIMEOUT_SETTINGS.withTimeout(TIMEOUT, SECONDS); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT = TIMEOUT_SETTINGS.withTimeout(0L, MILLISECONDS); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_TIME = TIMEOUT_SETTINGS.withMaxTimeMS(100); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME = TIMEOUT_SETTINGS.withMaxAwaitTimeMS(101); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME = + TIMEOUT_SETTINGS.withMaxTimeAndMaxAwaitTimeMS(101, 1001); + + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS = + TIMEOUT_SETTINGS.withMaxTimeAndMaxAwaitTimeMS(101, 1001).withMaxCommitMS(999L); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_COMMIT = TIMEOUT_SETTINGS.withMaxCommitMS(999L); + public static final String LEGACY_HELLO = "isMaster"; private static ConnectionString connectionString; @@ -164,12 +185,28 @@ public static ServerVersion getServerVersion() { if (serverVersion == null) { serverVersion = getVersion(new CommandReadOperation<>("admin", new BsonDocument("buildInfo", new BsonInt32(1)), new BsonDocumentCodec()) - .execute(new ClusterBinding(getCluster(), ReadPreference.nearest(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE))); + .execute(new ClusterBinding(getCluster(), ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT))); } return serverVersion; } + public static final OperationContext OPERATION_CONTEXT = new OperationContext( + IgnorableRequestContext.INSTANCE, + new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT), + new TimeoutContext(TIMEOUT_SETTINGS), + getServerApi()); + + public static final InternalOperationContextFactory OPERATION_CONTEXT_FACTORY = + new InternalOperationContextFactory(TIMEOUT_SETTINGS, getServerApi()); + + public static OperationContext createOperationContext(final TimeoutSettings timeoutSettings) { + return new OperationContext( + IgnorableRequestContext.INSTANCE, + new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT), + new TimeoutContext(timeoutSettings), + getServerApi()); + } + private static ServerVersion getVersion(final BsonDocument buildInfoResult) { List versionArray = buildInfoResult.getArray("versionArray").subList(0, 3); @@ -208,7 +245,8 @@ public static boolean hasEncryptionTestsEnabled() { } public static Document getServerStatus() { - return new CommandReadOperation<>("admin", new BsonDocument("serverStatus", new BsonInt32(1)), new DocumentCodec()) + return new CommandReadOperation<>("admin", new BsonDocument("serverStatus", new BsonInt32(1)), + new DocumentCodec()) .execute(getBinding()); } @@ -272,8 +310,8 @@ public static synchronized ConnectionString getConnectionString() { new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), SslSettings.builder().build())); try { BsonDocument helloResult = new CommandReadOperation<>("admin", - new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()).execute(new ClusterBinding(cluster, - ReadPreference.nearest(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE)); + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()) + .execute(new ClusterBinding(cluster, ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT)); if (helloResult.containsKey("setName")) { connectionString = new ConnectionString(DEFAULT_URI + "/?replicaSet=" + helloResult.getString("setName").getValue()); @@ -316,29 +354,49 @@ private static ConnectionString getConnectionStringFromSystemProperty(final Stri return null; } + public static ReadWriteBinding getBinding() { + return getBinding(getCluster()); + } + public static ReadWriteBinding getBinding(final Cluster cluster) { - return new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE); + return new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT); } - public static ReadWriteBinding getBinding() { - return getBinding(getCluster(), ReadPreference.primary()); + public static ReadWriteBinding getBinding(final TimeoutSettings timeoutSettings) { + return getBinding(getCluster(), ReadPreference.primary(), createNewOperationContext(timeoutSettings)); + } + + public static ReadWriteBinding getBinding(final OperationContext operationContext) { + return getBinding(getCluster(), ReadPreference.primary(), operationContext); } public static ReadWriteBinding getBinding(final ReadPreference readPreference) { - return getBinding(getCluster(), readPreference); + return getBinding(getCluster(), readPreference, OPERATION_CONTEXT); + } + + public static OperationContext createNewOperationContext(final TimeoutSettings timeoutSettings) { + return new OperationContext(OPERATION_CONTEXT.getId(), + OPERATION_CONTEXT.getRequestContext(), + OPERATION_CONTEXT.getSessionContext(), + new TimeoutContext(timeoutSettings), + OPERATION_CONTEXT.getServerApi()); } - private static ReadWriteBinding getBinding(final Cluster cluster, final ReadPreference readPreference) { + private static ReadWriteBinding getBinding(final Cluster cluster, + final ReadPreference readPreference, + final OperationContext operationContext) { if (!BINDING_MAP.containsKey(readPreference)) { - ReadWriteBinding binding = new SessionBinding(new ClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE)); + ReadWriteBinding binding = new SessionBinding(new ClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, + operationContext)); BINDING_MAP.put(readPreference, binding); } - return BINDING_MAP.get(readPreference); + ReadWriteBinding readWriteBinding = BINDING_MAP.get(readPreference); + return new OperationContextBinding(readWriteBinding, + operationContext.withSessionContext(readWriteBinding.getOperationContext().getSessionContext())); } public static SingleConnectionBinding getSingleConnectionBinding() { - return new SingleConnectionBinding(getCluster(), ReadPreference.primary(), getServerApi()); + return new SingleConnectionBinding(getCluster(), ReadPreference.primary(), OPERATION_CONTEXT); } public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding() { @@ -346,29 +404,41 @@ public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding() { } public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding(final Cluster cluster) { - return new AsyncSingleConnectionBinding(cluster, 20, SECONDS, getServerApi()); + return new AsyncSingleConnectionBinding(cluster, ReadPreference.primary(), OPERATION_CONTEXT); } public static AsyncReadWriteBinding getAsyncBinding(final Cluster cluster) { - return new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE); + return new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT); } public static AsyncReadWriteBinding getAsyncBinding() { - return getAsyncBinding(getAsyncCluster(), ReadPreference.primary()); + return getAsyncBinding(getAsyncCluster(), ReadPreference.primary(), OPERATION_CONTEXT); + } + + public static AsyncReadWriteBinding getAsyncBinding(final TimeoutSettings timeoutSettings) { + return getAsyncBinding(createNewOperationContext(timeoutSettings)); + } + + public static AsyncReadWriteBinding getAsyncBinding(final OperationContext operationContext) { + return getAsyncBinding(getAsyncCluster(), ReadPreference.primary(), operationContext); } public static AsyncReadWriteBinding getAsyncBinding(final ReadPreference readPreference) { - return getAsyncBinding(getAsyncCluster(), readPreference); + return getAsyncBinding(getAsyncCluster(), readPreference, OPERATION_CONTEXT); } - public static AsyncReadWriteBinding getAsyncBinding(final Cluster cluster, final ReadPreference readPreference) { + public static AsyncReadWriteBinding getAsyncBinding( + final Cluster cluster, + final ReadPreference readPreference, + final OperationContext operationContext) { if (!ASYNC_BINDING_MAP.containsKey(readPreference)) { AsyncReadWriteBinding binding = new AsyncSessionBinding(new AsyncClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, - getServerApi(), IgnorableRequestContext.INSTANCE)); + operationContext)); ASYNC_BINDING_MAP.put(readPreference, binding); } - return ASYNC_BINDING_MAP.get(readPreference); + AsyncReadWriteBinding readWriteBinding = ASYNC_BINDING_MAP.get(readPreference); + return new AsyncOperationContextBinding(readWriteBinding, + operationContext.withSessionContext(readWriteBinding.getOperationContext().getSessionContext())); } public static synchronized Cluster getCluster() { @@ -402,16 +472,17 @@ private static Cluster createCluster(final MongoCredential credential, final Str return new DefaultClusterFactory().createCluster(ClusterSettings.builder().hosts(asList(getPrimary())).build(), ServerSettings.builder().build(), ConnectionPoolSettings.builder().maxSize(1).build(), InternalConnectionPoolSettings.builder().build(), - streamFactory, streamFactory, credential, LoggerSettings.builder().build(), null, null, null, - Collections.emptyList(), getServerApi(), null); + TIMEOUT_SETTINGS.connectionOnly(), streamFactory, TIMEOUT_SETTINGS.connectionOnly(), streamFactory, credential, + LoggerSettings.builder().build(), null, null, null, Collections.emptyList(), getServerApi(), null); } private static Cluster createCluster(final ConnectionString connectionString, final StreamFactory streamFactory) { - return new DefaultClusterFactory().createCluster(ClusterSettings.builder().applyConnectionString(connectionString).build(), - ServerSettings.builder().build(), - ConnectionPoolSettings.builder().applyConnectionString(connectionString).build(), - InternalConnectionPoolSettings.builder().build(), - streamFactory, + MongoClientSettings mongoClientSettings = MongoClientSettings.builder().applyConnectionString(connectionString).build(); + + return new DefaultClusterFactory().createCluster(mongoClientSettings.getClusterSettings(), + mongoClientSettings.getServerSettings(), mongoClientSettings.getConnectionPoolSettings(), + InternalConnectionPoolSettings.builder().build(), TimeoutSettings.create(mongoClientSettings).connectionOnly(), + streamFactory, TimeoutSettings.createHeartbeatSettings(mongoClientSettings).connectionOnly(), new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().readTimeout(5, SECONDS).build(), getSslSettings(connectionString)), connectionString.getCredential(), @@ -475,32 +546,40 @@ public static SslSettings getSslSettings(final ConnectionString connectionString return SslSettings.builder().applyConnectionString(connectionString).build(); } - public static ServerAddress getPrimary() { + public static ServerDescription getPrimaryServerDescription() { List serverDescriptions = getPrimaries(getClusterDescription(getCluster())); while (serverDescriptions.isEmpty()) { - try { - sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + sleep(100); serverDescriptions = getPrimaries(getClusterDescription(getCluster())); } - return serverDescriptions.get(0).getAddress(); + return serverDescriptions.get(0); + } + + public static ServerAddress getPrimary() { + return getPrimaryServerDescription().getAddress(); + } + + public static long getPrimaryRTT() { + return MILLISECONDS.convert(getPrimaryServerDescription().getRoundTripTimeNanos(), NANOSECONDS); } public static ServerAddress getSecondary() { List serverDescriptions = getSecondaries(getClusterDescription(getCluster())); while (serverDescriptions.isEmpty()) { - try { - sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + sleep(100); serverDescriptions = getSecondaries(getClusterDescription(getCluster())); } return serverDescriptions.get(0).getAddress(); } + public static void sleep(final int sleepMS) { + try { + Thread.sleep(sleepMS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + @Nullable public static MongoCredential getCredential() { return getConnectionString().getCredential(); @@ -518,8 +597,7 @@ public static MongoCredentialWithCache getCredentialWithCache() { public static BsonDocument getServerParameters() { if (serverParameters == null) { serverParameters = new CommandReadOperation<>("admin", - new BsonDocument("getParameter", new BsonString("*")), - new BsonDocumentCodec()) + new BsonDocument("getParameter", new BsonString("*")), new BsonDocumentCodec()) .execute(getBinding()); } return serverParameters; @@ -599,7 +677,8 @@ public static void disableFailPoint(final String failPoint) { BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString(failPoint)) .append("mode", new BsonString("off")); try { - new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()).execute(getBinding()); + new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()) + .execute(getBinding()); } catch (MongoCommandException e) { // ignore } @@ -743,7 +822,7 @@ public static int getReferenceCountAfterTimeout(final ReferenceCounted reference if (System.currentTimeMillis() > startTime + TIMEOUT_DURATION.toMillis()) { return count; } - sleep(10); + Thread.sleep(10); count = referenceCounted.getCount(); } catch (InterruptedException e) { throw interruptAndCreateMongoInterruptedException("Interrupted", e); @@ -755,4 +834,11 @@ public static int getReferenceCountAfterTimeout(final ReferenceCounted reference public static ClusterSettings.Builder setDirectConnection(final ClusterSettings.Builder builder) { return builder.mode(ClusterConnectionMode.SINGLE).hosts(singletonList(getPrimary())); } + + public static int applyTimeoutMultiplierForServerless(final int timeoutMs) { + if (ClusterFixture.isServerlessTest()) { + return timeoutMs * 2; + } + return timeoutMs; + } } diff --git a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy index 372fdd4b82d..adf707b9cb7 100644 --- a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy @@ -45,7 +45,6 @@ import com.mongodb.internal.binding.WriteBinding import com.mongodb.internal.bulk.InsertRequest import com.mongodb.internal.connection.AsyncConnection import com.mongodb.internal.connection.Connection -import com.mongodb.internal.connection.OperationContext import com.mongodb.internal.connection.ServerHelper import com.mongodb.internal.connection.SplittablePayload import com.mongodb.internal.operation.AsyncReadOperation @@ -64,6 +63,7 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.TIMEOUT import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget import static com.mongodb.ClusterFixture.executeAsync @@ -109,13 +109,14 @@ class OperationFunctionalSpecification extends Specification { } void acknowledgeWrite(final SingleConnectionBinding binding) { - new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false).execute(binding) + new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, + ACKNOWLEDGED, false).execute(binding) binding.release() } void acknowledgeWrite(final AsyncSingleConnectionBinding binding) { - executeAsync(new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false), - binding) + executeAsync(new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], + true, ACKNOWLEDGED, false), binding) binding.release() } @@ -142,7 +143,9 @@ class OperationFunctionalSpecification extends Specification { def executeWithSession(operation, boolean async) { def executor = async ? ClusterFixture.&executeAsync : ClusterFixture.&executeSync - def binding = async ? new AsyncSessionBinding(getAsyncBinding()) : new SessionBinding(getBinding()) + def binding = async ? + new AsyncSessionBinding(getAsyncBinding()) + : new SessionBinding(getBinding()) executor(operation, binding) } @@ -270,7 +273,11 @@ class OperationFunctionalSpecification extends Specification { BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary(), Boolean retryable = false, ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { - def operationContext = new OperationContext() + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> activeTransaction + getReadConcern() >> readConcern + }) def connection = Mock(Connection) { _ * getDescription() >> Stub(ConnectionDescription) { getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) @@ -283,7 +290,6 @@ class OperationFunctionalSpecification extends Specification { connection } getOperationContext() >> operationContext - getServerApi() >> null getReadPreference() >> readPreference getServerDescription() >> { def builder = ServerDescription.builder().address(Stub(ServerAddress)).state(ServerConnectionState.CONNECTED) @@ -296,23 +302,11 @@ class OperationFunctionalSpecification extends Specification { def readBinding = Stub(ReadBinding) { getReadConnectionSource(*_) >> connectionSource getReadPreference() >> readPreference - getServerApi() >> null getOperationContext() >> operationContext - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> activeTransaction - getReadConcern() >> readConcern - } } def writeBinding = Stub(WriteBinding) { getWriteConnectionSource() >> connectionSource - getServerApi() >> null getOperationContext() >> operationContext - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> activeTransaction - getReadConcern() >> readConcern - } } if (retryable) { @@ -356,7 +350,11 @@ class OperationFunctionalSpecification extends Specification { Boolean checkCommand = true, BsonDocument expectedCommand = null, Boolean checkSecondaryOk = false, ReadPreference readPreference = ReadPreference.primary(), Boolean retryable = false, ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { - def operationContext = new OperationContext() + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> activeTransaction + getReadConcern() >> readConcern + }) def connection = Mock(AsyncConnection) { _ * getDescription() >> Stub(ConnectionDescription) { getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) @@ -367,7 +365,6 @@ class OperationFunctionalSpecification extends Specification { def connectionSource = Stub(AsyncConnectionSource) { getConnection(_) >> { it[0].onResult(connection, null) } getReadPreference() >> readPreference - getServerApi() >> null getOperationContext() >> operationContext getServerDescription() >> { def builder = ServerDescription.builder().address(Stub(ServerAddress)).state(ServerConnectionState.CONNECTED) @@ -380,23 +377,11 @@ class OperationFunctionalSpecification extends Specification { def readBinding = Stub(AsyncReadBinding) { getReadConnectionSource(*_) >> { it.last().onResult(connectionSource, null) } getReadPreference() >> readPreference - getServerApi() >> null getOperationContext() >> operationContext - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> activeTransaction - getReadConcern() >> readConcern - } } def writeBinding = Stub(AsyncWriteBinding) { getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } - getServerApi() >> null getOperationContext() >> operationContext - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> activeTransaction - getReadConcern() >> readConcern - } } def callback = new FutureResultCallback() @@ -458,6 +443,13 @@ class OperationFunctionalSpecification extends Specification { } } + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + def connectionSource = Stub(ConnectionSource) { getConnection() >> { if (serverVersions.isEmpty()){ @@ -466,16 +458,11 @@ class OperationFunctionalSpecification extends Specification { connection } } - getServerApi() >> null + getOperationContext() >> operationContext } def writeBinding = Stub(WriteBinding) { getWriteConnectionSource() >> connectionSource - getServerApi() >> null - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> false - getReadConcern() >> ReadConcern.DEFAULT - } + getOperationContext() >> operationContext } 1 * connection.command(*_) >> { @@ -499,8 +486,14 @@ class OperationFunctionalSpecification extends Specification { } } + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + def connectionSource = Stub(AsyncConnectionSource) { - getServerApi() >> null getConnection(_) >> { if (serverVersions.isEmpty()) { it[0].onResult(null, @@ -509,16 +502,12 @@ class OperationFunctionalSpecification extends Specification { it[0].onResult(connection, null) } } + getOperationContext() >> operationContext } def writeBinding = Stub(AsyncWriteBinding) { - getServerApi() >> null getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> false - getReadConcern() >> ReadConcern.DEFAULT - } + getOperationContext() >> operationContext } def callback = new FutureResultCallback() diff --git a/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java index 4c045001b10..23be2ccc3ab 100644 --- a/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java +++ b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java @@ -29,6 +29,7 @@ import org.bson.BsonInt32; import org.bson.BsonInt64; import org.bson.BsonString; +import org.bson.BsonType; import org.bson.BsonValue; import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.BsonValueCodecProvider; @@ -117,11 +118,11 @@ static boolean isWriteCommand(final String commandName) { return asList("insert", "update", "delete").contains(commandName); } - public static void assertEventsEquality(final List expectedEvents, final List events) { + public static void assertEventsEquality(final List expectedEvents, final List events) { assertEventsEquality(expectedEvents, events, null); } - public static void assertEventsEquality(final List expectedEvents, final List events, + public static void assertEventsEquality(final List expectedEvents, final List events, @Nullable final Map lsidMap) { assertEquals(expectedEvents.size(), events.size()); @@ -221,25 +222,33 @@ private static CommandSucceededEvent massageActualCommandSucceededEvent(final Co private static CommandStartedEvent massageActualCommandStartedEvent(final CommandStartedEvent event, @Nullable final Map lsidMap, final CommandStartedEvent expectedCommandStartedEvent) { - BsonDocument command = getWritableCloneOfCommand(event.getCommand()); + BsonDocument actualCommand = getWritableCloneOfCommand(event.getCommand()); + BsonDocument expectedCommand = expectedCommandStartedEvent.getCommand(); - massageCommand(event, command); + massageCommand(event, actualCommand); - if (command.containsKey("readConcern") && (command.getDocument("readConcern").containsKey("afterClusterTime"))) { - command.getDocument("readConcern").put("afterClusterTime", new BsonInt32(42)); + if (actualCommand.containsKey("readConcern") && (actualCommand.getDocument("readConcern").containsKey("afterClusterTime"))) { + actualCommand.getDocument("readConcern").put("afterClusterTime", new BsonInt32(42)); } - // Tests expect maxTimeMS to be int32, but Java API requires maxTime to be a long. This massage seems preferable to casting - if (command.containsKey("maxTimeMS")) { - command.put("maxTimeMS", new BsonInt32(command.getNumber("maxTimeMS").intValue())); + if (actualCommand.containsKey("maxTimeMS") && !isExpectedMaxTimeMsLong(expectedCommand)) { + // Some tests expect maxTimeMS to be int32, but Java API requires maxTime to be a long. This massage seems preferable to casting + actualCommand.put("maxTimeMS", new BsonInt32(actualCommand.getNumber("maxTimeMS").intValue())); } // Tests do not expect the "ns" field in a result after running createIndex. - if (command.containsKey("createIndexes") && command.containsKey("indexes")) { - massageCommandIndexes(command.getArray("indexes")); + if (actualCommand.containsKey("createIndexes") && actualCommand.containsKey("indexes")) { + massageCommandIndexes(actualCommand.getArray("indexes")); } - massageActualCommand(command, expectedCommandStartedEvent.getCommand()); + massageActualCommand(actualCommand, expectedCommand); return new CommandStartedEvent(event.getRequestContext(), event.getOperationId(), event.getRequestId(), - event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), command); + event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), actualCommand); + } + + private static boolean isExpectedMaxTimeMsLong(final BsonDocument expectedCommand) { + if (expectedCommand.containsKey("maxTimeMS")) { + return expectedCommand.get("maxTimeMS").getBsonType() == BsonType.INT64; + } + return false; } private static void massageCommandIndexes(final BsonArray indexes) { diff --git a/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java b/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java index 8ebb1204ba3..119babf8875 100644 --- a/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java +++ b/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java @@ -21,7 +21,10 @@ import org.bson.BsonType; import org.bson.BsonValue; -import static org.junit.Assert.assertEquals; +import java.util.List; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; public final class CrudTestHelper { @@ -32,15 +35,12 @@ public static void replaceTypeAssertionWithActual(final BsonDocument expected, f BsonDocument valueDocument = value.asDocument(); BsonValue actualValue = actual.get(key); if (valueDocument.size() == 1 && valueDocument.getFirstKey().equals("$$type")) { - String type = valueDocument.getString("$$type").getValue(); - if (type.equals("binData")) { - assertEquals(BsonType.BINARY, actualValue.getBsonType()); - expected.put(key, actualValue); - } else if (type.equals("long")) { - assertEquals(BsonType.INT64, actualValue.getBsonType()); + List types = getExpectedTypes(valueDocument.get("$$type")); + String actualType = asTypeString(actualValue.getBsonType()); + if (types.contains(actualType)) { expected.put(key, actualValue); } else { - throw new UnsupportedOperationException("Unsupported type: " + type); + throw new UnsupportedOperationException("Unsupported type: " + actualValue); } } else if (actualValue != null && actualValue.isDocument()) { replaceTypeAssertionWithActual(valueDocument, actualValue.asDocument()); @@ -53,6 +53,31 @@ public static void replaceTypeAssertionWithActual(final BsonDocument expected, f } } + private static String asTypeString(final BsonType bsonType) { + switch (bsonType) { + case BINARY: + return "binData"; + case INT32: + return "int"; + case INT64: + return "long"; + default: + throw new UnsupportedOperationException("Unsupported bson type conversion to string: " + bsonType); + } + } + + private static List getExpectedTypes(final BsonValue expectedTypes) { + List types; + if (expectedTypes.isString()) { + types = singletonList(expectedTypes.asString().getValue()); + } else if (expectedTypes.isArray()) { + types = expectedTypes.asArray().stream().map(type -> type.asString().getValue()).collect(Collectors.toList()); + } else { + throw new UnsupportedOperationException("Unsupported type for $$type value"); + } + return types; + } + private static void replaceTypeAssertionWithActual(final BsonArray expected, final BsonArray actual) { for (int i = 0; i < expected.size(); i++) { BsonValue value = expected.get(i); @@ -63,6 +88,7 @@ private static void replaceTypeAssertionWithActual(final BsonArray expected, fin } } } + private CrudTestHelper() { } diff --git a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java index 01ed641e4b1..1cc3904749d 100644 --- a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java +++ b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java @@ -17,9 +17,9 @@ import com.mongodb.ReadPreference; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.binding.BindingContext; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.SplittablePayload; import org.bson.BsonDocument; import org.bson.FieldNameValidator; @@ -56,19 +56,19 @@ public ConnectionDescription getDescription() { @Override public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context) { + final OperationContext operationContext) { SupplyingCallback callback = new SupplyingCallback<>(); - wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, context, callback); + wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, callback); return callback.get(); } @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context, final boolean responseExpected, final SplittablePayload payload, + final OperationContext operationContext, final boolean responseExpected, final SplittablePayload payload, final FieldNameValidator payloadFieldNameValidator) { SupplyingCallback callback = new SupplyingCallback<>(); - wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, context, + wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext, responseExpected, payload, payloadFieldNameValidator, callback); return callback.get(); } diff --git a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java index 9e17843d9fe..e297726d325 100644 --- a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java +++ b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java @@ -85,7 +85,8 @@ public CollectionHelper(final Codec codec, final MongoNamespace namespace) { } public T hello() { - return new CommandReadOperation<>("admin", BsonDocument.parse("{isMaster: 1}"), codec).execute(getBinding()); + return new CommandReadOperation<>("admin", BsonDocument.parse("{isMaster: 1}"), codec) + .execute(getBinding()); } public static void drop(final MongoNamespace namespace) { @@ -160,9 +161,27 @@ public void create(final String collectionName, final CreateCollectionOptions op create(collectionName, options, WriteConcern.ACKNOWLEDGED); } + public void create(final WriteConcern writeConcern, final BsonDocument createOptions) { + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions(); + for (String option : createOptions.keySet()) { + switch (option) { + case "capped": + createCollectionOptions.capped(createOptions.getBoolean("capped").getValue()); + break; + case "size": + createCollectionOptions.sizeInBytes(createOptions.getNumber("size").longValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported create collection option: " + option); + } + } + create(namespace.getCollectionName(), createCollectionOptions, writeConcern); + } + public void create(final String collectionName, final CreateCollectionOptions options, final WriteConcern writeConcern) { drop(namespace, writeConcern); - CreateCollectionOperation operation = new CreateCollectionOperation(namespace.getDatabaseName(), collectionName, writeConcern) + CreateCollectionOperation operation = new CreateCollectionOperation(namespace.getDatabaseName(), collectionName, + writeConcern) .capped(options.isCapped()) .sizeInBytes(options.getSizeInBytes()) .maxDocuments(options.getMaxDocuments()); @@ -217,6 +236,10 @@ public void insertDocuments(final BsonDocument... documents) { insertDocuments(asList(documents)); } + public void insertDocuments(final WriteConcern writeConcern, final BsonDocument... documents) { + insertDocuments(asList(documents), writeConcern); + } + public void insertDocuments(final List documents) { insertDocuments(documents, getBinding()); } @@ -301,18 +324,18 @@ public void updateOne(final Bson filter, final Bson update, final boolean isUpse public void replaceOne(final Bson filter, final Bson update, final boolean isUpsert) { new MixedBulkWriteOperation(namespace, - singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), + singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), update.toBsonDocument(Document.class, registry), WriteRequest.Type.REPLACE) .upsert(isUpsert)), - true, WriteConcern.ACKNOWLEDGED, false) + true, WriteConcern.ACKNOWLEDGED, false) .execute(getBinding()); } public void deleteOne(final Bson filter) { new MixedBulkWriteOperation(namespace, - singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry))), - true, WriteConcern.ACKNOWLEDGED, false) + singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry))), + true, WriteConcern.ACKNOWLEDGED, false) .execute(getBinding()); } @@ -333,11 +356,11 @@ public List aggregateDb(final List pipeline) { } private List aggregate(final List pipeline, final Decoder decoder, final AggregationLevel level) { - List bsonDocumentPipeline = new ArrayList(); + List bsonDocumentPipeline = new ArrayList<>(); for (Bson cur : pipeline) { bsonDocumentPipeline.add(cur.toBsonDocument(Document.class, registry)); } - BatchCursor cursor = new AggregateOperation(namespace, bsonDocumentPipeline, decoder, level) + BatchCursor cursor = new AggregateOperation<>(namespace, bsonDocumentPipeline, decoder, level) .execute(getBinding()); List results = new ArrayList<>(); while (cursor.hasNext()) { @@ -372,8 +395,8 @@ public List find(final BsonDocument filter, final BsonDocument sort, fina } public List find(final BsonDocument filter, final BsonDocument sort, final BsonDocument projection, final Decoder decoder) { - BatchCursor cursor = new FindOperation<>(namespace, decoder).filter(filter).sort(sort).projection(projection) - .execute(getBinding()); + BatchCursor cursor = new FindOperation<>(namespace, decoder).filter(filter).sort(sort) + .projection(projection).execute(getBinding()); List results = new ArrayList<>(); while (cursor.hasNext()) { results.addAll(cursor.next()); @@ -394,7 +417,8 @@ public long count(final AsyncReadWriteBinding binding) throws Throwable { } public long count(final Bson filter) { - return new CountDocumentsOperation(namespace).filter(toBsonDocument(filter)).execute(getBinding()); + return new CountDocumentsOperation(namespace) + .filter(toBsonDocument(filter)).execute(getBinding()); } public BsonDocument wrap(final Document document) { @@ -406,31 +430,35 @@ public BsonDocument toBsonDocument(final Bson document) { } public void createIndex(final BsonDocument key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED) + .execute(getBinding()); } public void createIndex(final Document key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED) + .execute(getBinding()); } public void createUniqueIndex(final Document key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key)).unique(true)), WriteConcern.ACKNOWLEDGED) + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key)).unique(true)), + WriteConcern.ACKNOWLEDGED) .execute(getBinding()); } public void createIndex(final Document key, final String defaultLanguage) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)), - WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(namespace, + singletonList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)), WriteConcern.ACKNOWLEDGED).execute(getBinding()); } public void createIndex(final Bson key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(key.toBsonDocument(Document.class, registry))), - WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(namespace, + singletonList(new IndexRequest(key.toBsonDocument(Document.class, registry))), WriteConcern.ACKNOWLEDGED).execute(getBinding()); } public List listIndexes(){ List indexes = new ArrayList<>(); - BatchCursor cursor = new ListIndexesOperation<>(namespace, new BsonDocumentCodec()).execute(getBinding()); + BatchCursor cursor = new ListIndexesOperation<>(namespace, new BsonDocumentCodec()) + .execute(getBinding()); while (cursor.hasNext()) { indexes.addAll(cursor.next()); } @@ -439,8 +467,8 @@ public List listIndexes(){ public static void killAllSessions() { try { - new CommandReadOperation<>("admin", new BsonDocument("killAllSessions", new BsonArray()), - new BsonDocumentCodec()).execute(getBinding()); + new CommandReadOperation<>("admin", + new BsonDocument("killAllSessions", new BsonArray()), new BsonDocumentCodec()).execute(getBinding()); } catch (MongoCommandException e) { // ignore exception caused by killing the implicit session that the killAllSessions command itself is running in } @@ -449,9 +477,8 @@ public static void killAllSessions() { public void renameCollection(final MongoNamespace newNamespace) { try { new CommandReadOperation<>("admin", - new BsonDocument("renameCollection", new BsonString(getNamespace().getFullName())) - .append("to", new BsonString(newNamespace.getFullName())), - new BsonDocumentCodec()).execute(getBinding()); + new BsonDocument("renameCollection", new BsonString(getNamespace().getFullName())) + .append("to", new BsonString(newNamespace.getFullName())), new BsonDocumentCodec()).execute(getBinding()); } catch (MongoCommandException e) { // do nothing } @@ -462,10 +489,12 @@ public void runAdminCommand(final String command) { } public void runAdminCommand(final BsonDocument command) { - new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(getBinding()); + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) + .execute(getBinding()); } public void runAdminCommand(final BsonDocument command, final ReadPreference readPreference) { - new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(getBinding(readPreference)); + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) + .execute(getBinding(readPreference)); } } diff --git a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy index d75d6ef489e..b3da89231e7 100644 --- a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy @@ -66,6 +66,6 @@ class ConnectionSpecification extends OperationFunctionalSpecification { } private static BsonDocument getHelloResult() { new CommandReadOperation('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), - new BsonDocumentCodec()).execute(getBinding()) + new BsonDocumentCodec()).execute(getBinding()) } } diff --git a/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy index 74dad9221c0..012ba23e339 100644 --- a/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy @@ -18,6 +18,7 @@ import util.spock.annotations.Slow import java.util.concurrent.CountDownLatch import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getSslSettings class NettyStreamSpecification extends Specification { @@ -42,7 +43,7 @@ class NettyStreamSpecification extends Specification { def stream = factory.create(new ServerAddress()) when: - stream.open() + stream.open(OPERATION_CONTEXT) then: !stream.isClosed() @@ -68,7 +69,7 @@ class NettyStreamSpecification extends Specification { def stream = factory.create(new ServerAddress()) when: - stream.open() + stream.open(OPERATION_CONTEXT) then: thrown(MongoSocketOpenException) @@ -95,7 +96,7 @@ class NettyStreamSpecification extends Specification { def callback = new CallbackErrorHolder() when: - stream.openAsync(callback) + stream.openAsync(OPERATION_CONTEXT, callback) then: callback.getError().is(exception) diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java new file mode 100644 index 00000000000..17b1a1c4a7e --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java @@ -0,0 +1,145 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; + +import static org.bson.assertions.Assertions.notNull; + +public final class AsyncOperationContextBinding implements AsyncReadWriteBinding { + + private final AsyncReadWriteBinding wrapped; + private final OperationContext operationContext; + + public AsyncOperationContextBinding(final AsyncReadWriteBinding wrapped, final OperationContext operationContext) { + this.wrapped = notNull("wrapped", wrapped); + this.operationContext = notNull("operationContext", operationContext); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getWriteConnectionSource(final SingleResultCallback callback) { + wrapped.getWriteConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public void getReadConnectionSource(final SingleResultCallback callback) { + wrapped.getReadConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + + @Override + public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference, + final SingleResultCallback callback) { + wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + private class SessionBindingAsyncConnectionSource implements AsyncConnectionSource { + private final AsyncConnectionSource wrapped; + + SessionBindingAsyncConnectionSource(final AsyncConnectionSource wrapped) { + this.wrapped = wrapped; + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getConnection(final SingleResultCallback callback) { + wrapped.getConnection(callback); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncConnectionSource retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + } + + public AsyncReadWriteBinding getWrapped() { + return wrapped; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java index ea56301e8cb..fa588a340d0 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java @@ -17,25 +17,21 @@ package com.mongodb.internal.binding; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; import static org.bson.assertions.Assertions.notNull; public final class AsyncSessionBinding implements AsyncReadWriteBinding { private final AsyncReadWriteBinding wrapped; - private final SessionContext sessionContext; + private final OperationContext operationContext; public AsyncSessionBinding(final AsyncReadWriteBinding wrapped) { this.wrapped = notNull("wrapped", wrapped); - this.sessionContext = new SimpleSessionContext(); + this.operationContext = wrapped.getOperationContext().withSessionContext(new SimpleSessionContext()); } @Override @@ -54,25 +50,9 @@ public void getWriteConnectionSource(final SingleResultCallback> OPERATION_CONTEXT def binding = new AsyncSessionBinding(wrapped) when: @@ -63,10 +66,10 @@ class AsyncSessionBindingSpecification extends Specification { 1 * wrapped.getWriteConnectionSource(_) when: - def context = binding.getSessionContext() + def context = binding.getOperationContext().getSessionContext() then: - 0 * wrapped.getSessionContext() + 0 * wrapped.getOperationContext().getSessionContext() context instanceof SimpleSessionContext } diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java index ca783beb2df..3fff8b66e06 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java @@ -19,20 +19,14 @@ import com.mongodb.MongoInternalException; import com.mongodb.MongoTimeoutException; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; import com.mongodb.connection.ServerDescription; -import com.mongodb.internal.IgnorableRequestContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Cluster; -import com.mongodb.internal.connection.NoOpSessionContext; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.Server; import com.mongodb.internal.selector.ReadPreferenceServerSelector; import com.mongodb.internal.selector.WritableServerSelector; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -54,35 +48,18 @@ public class AsyncSingleConnectionBinding extends AbstractReferenceCounted imple private volatile Server writeServer; private volatile ServerDescription readServerDescription; private volatile ServerDescription writeServerDescription; - @Nullable - private final ServerApi serverApi; - private final OperationContext operationContext = new OperationContext(); + private final OperationContext operationContext; /** * Create a new binding with the given cluster. - * @param cluster a non-null Cluster which will be used to select a server to bind to - * @param maxWaitTime the maximum time to wait for a connection to become available. - * @param timeUnit a non-null TimeUnit for the maxWaitTime - * @param serverApi the server api, which may be null - */ - public AsyncSingleConnectionBinding(final Cluster cluster, final long maxWaitTime, final TimeUnit timeUnit, - @Nullable final ServerApi serverApi) { - this(cluster, primary(), maxWaitTime, timeUnit, serverApi); - } - - /** - * Create a new binding with the given cluster. - * @param cluster a non-null Cluster which will be used to select a server to bind to + * + * @param cluster a non-null Cluster which will be used to select a server to bind to * @param readPreference the readPreference for reads, if not primary a separate connection will be used for reads - * @param maxWaitTime the maximum time to wait for a connection to become available. - * @param timeUnit a non-null TimeUnit for the maxWaitTime - * @param serverApi the server api, which may be null + * @param operationContext the operation context */ - public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, - final long maxWaitTime, final TimeUnit timeUnit, @Nullable final ServerApi serverApi) { - this.serverApi = serverApi; - + public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, final OperationContext operationContext) { notNull("cluster", cluster); + this.operationContext = operationContext; this.readPreference = notNull("readPreference", readPreference); CountDownLatch latch = new CountDownLatch(2); cluster.selectServerAsync(new WritableServerSelector(), operationContext, (result, t) -> { @@ -100,7 +77,7 @@ public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference } }); - awaitLatch(maxWaitTime, timeUnit, latch); + awaitLatch(latch); if (writeServer == null || readServer == null) { throw new MongoInternalException("Failure to select server"); @@ -112,7 +89,7 @@ public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference writeServerLatch.countDown(); }); - awaitLatch(maxWaitTime, timeUnit, writeServerLatch); + awaitLatch(writeServerLatch); if (writeConnection == null) { throw new MongoInternalException("Failure to get connection"); @@ -124,16 +101,16 @@ public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference readConnection = result; readServerLatch.countDown(); }); - awaitLatch(maxWaitTime, timeUnit, readServerLatch); + awaitLatch(readServerLatch); if (readConnection == null) { throw new MongoInternalException("Failure to get connection"); } } - private void awaitLatch(final long maxWaitTime, final TimeUnit timeUnit, final CountDownLatch latch) { + private void awaitLatch(final CountDownLatch latch) { try { - if (!latch.await(maxWaitTime, timeUnit)) { + if (!latch.await(operationContext.getTimeoutContext().timeoutOrAlternative(10000), TimeUnit.MILLISECONDS)) { throw new MongoTimeoutException("Failed to get servers"); } } catch (InterruptedException e) { @@ -152,22 +129,6 @@ public ReadPreference getReadPreference() { return readPreference; } - @Override - public SessionContext getSessionContext() { - return NoOpSessionContext.INSTANCE; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return IgnorableRequestContext.INSTANCE; - } - @Override public OperationContext getOperationContext() { return operationContext; @@ -221,22 +182,6 @@ public ServerDescription getServerDescription() { return serverDescription; } - @Override - public SessionContext getSessionContext() { - return NoOpSessionContext.INSTANCE; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return IgnorableRequestContext.INSTANCE; - } - @Override public OperationContext getOperationContext() { return operationContext; diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java new file mode 100644 index 00000000000..6af3f4520d4 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; + +import static org.bson.assertions.Assertions.notNull; + +public class OperationContextBinding implements ReadWriteBinding { + private final ReadWriteBinding wrapped; + private final OperationContext operationContext; + + public OperationContextBinding(final ReadWriteBinding wrapped, final OperationContext operationContext) { + this.wrapped = notNull("wrapped", wrapped); + this.operationContext = notNull("operationContext", operationContext); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public ReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + @Override + public ConnectionSource getReadConnectionSource() { + return new SessionBindingConnectionSource(wrapped.getReadConnectionSource()); + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + return new SessionBindingConnectionSource(wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference)); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ConnectionSource getWriteConnectionSource() { + return new SessionBindingConnectionSource(wrapped.getWriteConnectionSource()); + } + + private class SessionBindingConnectionSource implements ConnectionSource { + private ConnectionSource wrapped; + + SessionBindingConnectionSource(final ConnectionSource wrapped) { + this.wrapped = wrapped; + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public Connection getConnection() { + return wrapped.getConnection(); + } + + @Override + public ConnectionSource retain() { + wrapped = wrapped.retain(); + return this; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + return wrapped.release(); + } + } + + public ReadWriteBinding getWrapped() { + return wrapped; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java index 4005a56af2b..3a2666a8093 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java @@ -17,23 +17,19 @@ package com.mongodb.internal.binding; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; -import com.mongodb.internal.connection.OperationContext; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.connection.Connection; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; +import com.mongodb.internal.connection.OperationContext; import static org.bson.assertions.Assertions.notNull; public class SessionBinding implements ReadWriteBinding { private final ReadWriteBinding wrapped; - private final SessionContext sessionContext; + private final OperationContext operationContext; public SessionBinding(final ReadWriteBinding wrapped) { this.wrapped = notNull("wrapped", wrapped); - this.sessionContext = new SimpleSessionContext(); + this.operationContext = wrapped.getOperationContext().withSessionContext(new SimpleSessionContext()); } @Override @@ -67,25 +63,9 @@ public ConnectionSource getReadConnectionSource(final int minWireVersion, final return new SessionBindingConnectionSource(wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference)); } - @Override - public SessionContext getSessionContext() { - return sessionContext; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); - } - @Override public OperationContext getOperationContext() { - return wrapped.getOperationContext(); + return operationContext; } @Override @@ -105,24 +85,9 @@ public ServerDescription getServerDescription() { return wrapped.getServerDescription(); } - @Override - public SessionContext getSessionContext() { - return sessionContext; - } - @Override public OperationContext getOperationContext() { - return wrapped.getOperationContext(); - } - - @Override - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); + return operationContext; } @Override diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java index bff96ee9941..ee258fb28cf 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java @@ -28,13 +28,13 @@ import java.util.UUID; -class SimpleSessionContext implements SessionContext { +public class SimpleSessionContext implements SessionContext { private final BsonDocument sessionId; private BsonTimestamp operationTime; private long counter; private BsonDocument clusterTime; - SimpleSessionContext() { + public SimpleSessionContext() { this.sessionId = createNewServerSessionIdentifier(); } diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java index e371003fc75..6bf3cff636d 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java @@ -17,19 +17,13 @@ package com.mongodb.internal.binding; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; -import com.mongodb.internal.connection.OperationContext; import com.mongodb.connection.ServerDescription; -import com.mongodb.internal.IgnorableRequestContext; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.Connection; -import com.mongodb.internal.connection.NoOpSessionContext; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.ServerTuple; import com.mongodb.internal.selector.ReadPreferenceServerSelector; import com.mongodb.internal.selector.WritableServerSelector; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.isTrue; @@ -47,8 +41,6 @@ public class SingleConnectionBinding implements ReadWriteBinding { private final ServerDescription readServerDescription; private final ServerDescription writeServerDescription; private int count = 1; - @Nullable - private final ServerApi serverApi; private final OperationContext operationContext; /** @@ -56,12 +48,12 @@ public class SingleConnectionBinding implements ReadWriteBinding { * * @param cluster a non-null Cluster which will be used to select a server to bind to * @param readPreference the readPreference for reads, if not primary a separate connection will be used for reads + * */ - public SingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, @Nullable final ServerApi serverApi) { - this.serverApi = serverApi; - operationContext = new OperationContext(); + public SingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, final OperationContext operationContext) { notNull("cluster", cluster); this.readPreference = notNull("readPreference", readPreference); + this.operationContext = operationContext; ServerTuple writeServerTuple = cluster.selectServer(new WritableServerSelector(), operationContext); writeServerDescription = writeServerTuple.getServerDescription(); writeConnection = writeServerTuple.getServer().getConnection(operationContext); @@ -112,22 +104,6 @@ public ConnectionSource getReadConnectionSource(final int minWireVersion, final throw new UnsupportedOperationException(); } - @Override - public SessionContext getSessionContext() { - return NoOpSessionContext.INSTANCE; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return IgnorableRequestContext.INSTANCE; - } - @Override public OperationContext getOperationContext() { return operationContext; @@ -155,26 +131,11 @@ public ServerDescription getServerDescription() { return serverDescription; } - @Override - public SessionContext getSessionContext() { - return NoOpSessionContext.INSTANCE; - } - @Override public OperationContext getOperationContext() { return operationContext; } - @Override - public ServerApi getServerApi() { - return serverApi; - } - - @Override - public RequestContext getRequestContext() { - return IgnorableRequestContext.INSTANCE; - } - @Override public ReadPreference getReadPreference() { return readPreference; diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy index b857c2574bd..0ac6b8fd9df 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy @@ -13,6 +13,7 @@ import util.spock.annotations.Slow import java.util.concurrent.CountDownLatch +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getSslSettings import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -39,7 +40,7 @@ class AsyncSocketChannelStreamSpecification extends Specification { def stream = factory.create(new ServerAddress('host1')) when: - stream.open() + stream.open(OPERATION_CONTEXT) then: !stream.isClosed() @@ -65,7 +66,7 @@ class AsyncSocketChannelStreamSpecification extends Specification { def stream = factory.create(new ServerAddress()) when: - stream.open() + stream.open(OPERATION_CONTEXT) then: thrown(MongoSocketOpenException) @@ -89,7 +90,7 @@ class AsyncSocketChannelStreamSpecification extends Specification { def callback = new CallbackErrorHolder() when: - stream.openAsync(callback) + stream.openAsync(OPERATION_CONTEXT, callback) then: callback.getError().is(exception) diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy index 858b5ce6c84..6efe88806e8 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy @@ -18,7 +18,6 @@ package com.mongodb.internal.connection import com.mongodb.LoggerSettings import com.mongodb.MongoSocketOpenException -import com.mongodb.MongoSocketReadTimeoutException import com.mongodb.OperationFunctionalSpecification import com.mongodb.ServerAddress import com.mongodb.connection.ClusterConnectionMode @@ -26,26 +25,20 @@ import com.mongodb.connection.ClusterId import com.mongodb.connection.ServerId import com.mongodb.connection.SocketSettings import com.mongodb.internal.connection.netty.NettyStreamFactory -import org.bson.BsonDocument -import org.bson.BsonInt32 -import org.bson.BsonString import spock.lang.IgnoreIf import util.spock.annotations.Slow import java.util.concurrent.TimeUnit -import static com.mongodb.ClusterFixture.getClusterConnectionMode +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getCredentialWithCache -import static com.mongodb.ClusterFixture.getPrimary import static com.mongodb.ClusterFixture.getServerApi import static com.mongodb.ClusterFixture.getSslSettings -import static com.mongodb.internal.connection.CommandHelper.executeCommand @Slow class AsyncStreamTimeoutsSpecification extends OperationFunctionalSpecification { static SocketSettings openSocketSettings = SocketSettings.builder().connectTimeout(1, TimeUnit.MILLISECONDS).build() - static SocketSettings readSocketSettings = SocketSettings.builder().readTimeout(5, TimeUnit.SECONDS).build() @IgnoreIf({ getSslSettings().isEnabled() }) def 'should throw a MongoSocketOpenException when the AsynchronousSocket Stream fails to open'() { @@ -56,35 +49,12 @@ class AsyncStreamTimeoutsSpecification extends OperationFunctionalSpecification .create(new ServerId(new ClusterId(), new ServerAddress(new InetSocketAddress('192.168.255.255', 27017)))) when: - connection.open() + connection.open(OPERATION_CONTEXT) then: thrown(MongoSocketOpenException) } - @IgnoreIf({ getSslSettings().isEnabled() }) - def 'should throw a MongoSocketReadTimeoutException with the AsynchronousSocket stream'() { - given: - def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE, - new AsynchronousSocketChannelStreamFactory(new DefaultInetAddressResolver(), readSocketSettings, getSslSettings()), - getCredentialWithCache(), null, null, - [], LoggerSettings.builder().build(), null, getServerApi()).create(new ServerId(new ClusterId(), getPrimary())) - connection.open() - - getCollectionHelper().insertDocuments(new BsonDocument('_id', new BsonInt32(1))) - def countCommand = new BsonDocument('count', new BsonString(getCollectionName())) - countCommand.put('query', new BsonDocument('$where', new BsonString('sleep(5050); return true;'))) - - when: - executeCommand(getDatabaseName(), countCommand, getClusterConnectionMode(), getServerApi(), connection) - - then: - thrown(MongoSocketReadTimeoutException) - - cleanup: - connection?.close() - } - def 'should throw a MongoSocketOpenException when the Netty Stream fails to open'() { given: def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE, @@ -93,32 +63,10 @@ class AsyncStreamTimeoutsSpecification extends OperationFunctionalSpecification new ServerAddress(new InetSocketAddress('192.168.255.255', 27017)))) when: - connection.open() + connection.open(OPERATION_CONTEXT) then: thrown(MongoSocketOpenException) } - - def 'should throw a MongoSocketReadTimeoutException with the Netty stream'() { - given: - def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE, - new NettyStreamFactory(readSocketSettings, getSslSettings()), getCredentialWithCache(), null, null, - [], LoggerSettings.builder().build(), null, getServerApi()).create(new ServerId(new ClusterId(), getPrimary())) - connection.open() - - getCollectionHelper().insertDocuments(new BsonDocument('_id', new BsonInt32(1))) - def countCommand = new BsonDocument('count', new BsonString(getCollectionName())) - countCommand.put('query', new BsonDocument('$where', new BsonString('sleep(5050); return true;'))) - - when: - executeCommand(getDatabaseName(), countCommand, getClusterConnectionMode(), getServerApi(), connection) - - then: - thrown(MongoSocketReadTimeoutException) - - cleanup: - connection?.close() - } - } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy index 21979eb87ce..8dd53bc1c03 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy @@ -19,6 +19,7 @@ import spock.lang.Specification import java.util.function.Supplier import static com.mongodb.AuthenticationMechanism.MONGODB_AWS +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getClusterConnectionMode import static com.mongodb.ClusterFixture.getConnectionString import static com.mongodb.ClusterFixture.getCredential @@ -51,7 +52,7 @@ class AwsAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: thrown(MongoCommandException) @@ -70,7 +71,7 @@ class AwsAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: true @@ -100,7 +101,7 @@ class AwsAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: true @@ -159,10 +160,10 @@ class AwsAuthenticationSpecification extends Specification { private static void openConnection(final InternalConnection connection, final boolean async) { if (async) { FutureResultCallback futureResultCallback = new FutureResultCallback() - connection.openAsync(futureResultCallback) + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS) } else { - connection.open() + connection.open(OPERATION_CONTEXT) } } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy index 6f005eb9733..085a5100198 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy @@ -30,6 +30,7 @@ import spock.lang.Specification import java.util.concurrent.CountDownLatch import static com.mongodb.ClusterFixture.LEGACY_HELLO +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getClusterConnectionMode import static com.mongodb.ClusterFixture.getCredentialWithCache import static com.mongodb.ClusterFixture.getPrimary @@ -45,7 +46,7 @@ class CommandHelperSpecification extends Specification { new NettyStreamFactory(SocketSettings.builder().build(), getSslSettings()), getCredentialWithCache(), null, null, [], LoggerSettings.builder().build(), null, getServerApi()) .create(new ServerId(new ClusterId(), getPrimary())) - connection.open() + connection.open(OPERATION_CONTEXT) } def cleanup() { @@ -58,7 +59,7 @@ class CommandHelperSpecification extends Specification { Throwable receivedException = null def latch1 = new CountDownLatch(1) executeCommandAsync('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), getClusterConnectionMode(), - getServerApi(), connection) + getServerApi(), connection, OPERATION_CONTEXT) { document, exception -> receivedDocument = document; receivedException = exception; latch1.countDown() } latch1.await() @@ -70,7 +71,7 @@ class CommandHelperSpecification extends Specification { when: def latch2 = new CountDownLatch(1) executeCommandAsync('admin', new BsonDocument('non-existent-command', new BsonInt32(1)), getClusterConnectionMode(), - getServerApi(), connection) + getServerApi(), connection, OPERATION_CONTEXT) { document, exception -> receivedDocument = document; receivedException = exception; latch2.countDown() } latch2.await() diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java index 919e0b130a8..56122ec64af 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java @@ -26,11 +26,13 @@ import com.mongodb.connection.ConnectionPoolSettings; import com.mongodb.connection.ServerId; import com.mongodb.event.ConnectionCreatedEvent; -import com.mongodb.internal.time.Timeout; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.inject.EmptyProvider; import com.mongodb.internal.inject.OptionalProvider; import com.mongodb.internal.inject.SameObjectProvider; +import com.mongodb.internal.time.TimePointTest; +import com.mongodb.internal.time.Timeout; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; @@ -58,6 +60,11 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Stream; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; import static java.lang.Long.MAX_VALUE; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.MINUTES; @@ -110,14 +117,14 @@ public void shouldThrowOnTimeout() throws InterruptedException { provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, ConnectionPoolSettings.builder() .maxSize(1) - .maxWaitTime(50, MILLISECONDS) .build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); - provider.get(new OperationContext()); + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(50); + provider.get(createOperationContext(timeoutSettings)); // when - TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(provider); + TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(provider, timeoutSettings); new Thread(connectionGetter).start(); connectionGetter.getLatch().await(); @@ -131,17 +138,16 @@ public void shouldThrowOnPoolClosed() { provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, ConnectionPoolSettings.builder() .maxSize(1) - .maxWaitTime(50, MILLISECONDS) .build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.close(); String expectedExceptionMessage = "The server at 127.0.0.1:27017 is no longer available"; MongoServerUnavailableException exception; - exception = assertThrows(MongoServerUnavailableException.class, () -> provider.get(new OperationContext())); + exception = assertThrows(MongoServerUnavailableException.class, () -> provider.get(OPERATION_CONTEXT)); assertEquals(expectedExceptionMessage, exception.getMessage()); SupplyingCallback supplyingCallback = new SupplyingCallback<>(); - provider.getAsync(new OperationContext(), supplyingCallback); + provider.getAsync(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50)), supplyingCallback); exception = assertThrows(MongoServerUnavailableException.class, supplyingCallback::get); assertEquals(expectedExceptionMessage, exception.getMessage()); } @@ -155,14 +161,14 @@ public void shouldExpireConnectionAfterMaxLifeTime() throws InterruptedException .maintenanceInitialDelay(5, MINUTES) .maxConnectionLifeTime(50, MILLISECONDS) .build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); // when - provider.get(new OperationContext()).close(); + provider.get(OPERATION_CONTEXT).close(); Thread.sleep(100); provider.doMaintenance(); - provider.get(new OperationContext()); + provider.get(OPERATION_CONTEXT); // then assertTrue(connectionFactory.getNumCreatedConnections() >= 2); // should really be two, but it's racy @@ -176,11 +182,11 @@ public void shouldExpireConnectionAfterLifeTimeOnClose() throws InterruptedExcep ConnectionPoolSettings.builder() .maxSize(1) .maxConnectionLifeTime(20, MILLISECONDS).build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); // when - InternalConnection connection = provider.get(new OperationContext()); + InternalConnection connection = provider.get(OPERATION_CONTEXT); Thread.sleep(50); connection.close(); @@ -197,14 +203,14 @@ public void shouldExpireConnectionAfterMaxIdleTime() throws InterruptedException .maxSize(1) .maintenanceInitialDelay(5, MINUTES) .maxConnectionIdleTime(50, MILLISECONDS).build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); // when - provider.get(new OperationContext()).close(); + provider.get(OPERATION_CONTEXT).close(); Thread.sleep(100); provider.doMaintenance(); - provider.get(new OperationContext()); + provider.get(OPERATION_CONTEXT); // then assertTrue(connectionFactory.getNumCreatedConnections() >= 2); // should really be two, but it's racy @@ -219,14 +225,14 @@ public void shouldCloseConnectionAfterExpiration() throws InterruptedException { .maxSize(1) .maintenanceInitialDelay(5, MINUTES) .maxConnectionLifeTime(20, MILLISECONDS).build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); // when - provider.get(new OperationContext()).close(); + provider.get(OPERATION_CONTEXT).close(); Thread.sleep(50); provider.doMaintenance(); - provider.get(new OperationContext()); + provider.get(OPERATION_CONTEXT); // then assertTrue(connectionFactory.getCreatedConnections().get(0).isClosed()); @@ -241,14 +247,14 @@ public void shouldCreateNewConnectionAfterExpiration() throws InterruptedExcepti .maxSize(1) .maintenanceInitialDelay(5, MINUTES) .maxConnectionLifeTime(20, MILLISECONDS).build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); // when - provider.get(new OperationContext()).close(); + provider.get(OPERATION_CONTEXT).close(); Thread.sleep(50); provider.doMaintenance(); - InternalConnection secondConnection = provider.get(new OperationContext()); + InternalConnection secondConnection = provider.get(OPERATION_CONTEXT); // then assertNotNull(secondConnection); @@ -265,9 +271,9 @@ public void shouldPruneAfterMaintenanceTaskRuns() throws InterruptedException { .maxConnectionLifeTime(1, MILLISECONDS) .maintenanceInitialDelay(5, MINUTES) .build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); - provider.get(new OperationContext()).close(); + provider.get(OPERATION_CONTEXT).close(); // when @@ -282,12 +288,12 @@ public void shouldPruneAfterMaintenanceTaskRuns() throws InterruptedException { void infiniteMaxSize() { int defaultMaxSize = ConnectionPoolSettings.builder().build().getMaxSize(); provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, - ConnectionPoolSettings.builder().maxSize(0).build(), EmptyProvider.instance()); + ConnectionPoolSettings.builder().maxSize(0).build(), EmptyProvider.instance(), OPERATION_CONTEXT_FACTORY); provider.ready(); List connections = new ArrayList<>(); try { for (int i = 0; i < 2 * defaultMaxSize; i++) { - connections.add(provider.get(new OperationContext())); + connections.add(provider.get(OPERATION_CONTEXT)); } } finally { connections.forEach(connection -> { @@ -313,18 +319,17 @@ public void concurrentUsage(final int minSize, final int maxSize, final boolean ConnectionPoolSettings.builder() .minSize(minSize) .maxSize(maxSize) - .maxWaitTime(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS) .maintenanceInitialDelay(0, NANOSECONDS) .maintenanceFrequency(100, MILLISECONDS) .maxConnectionLifeTime(limitConnectionLifeIdleTime ? 350 : 0, MILLISECONDS) .maxConnectionIdleTime(limitConnectionLifeIdleTime ? 50 : 0, MILLISECONDS) .build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); assertUseConcurrently(provider, concurrentUsersCount, checkoutSync, checkoutAsync, invalidateAndReadyProb, invalidateProb, readyProb, - cachedExecutor, SECONDS.toNanos(10)); + cachedExecutor, SECONDS.toNanos(10), TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS)); } private static Stream concurrentUsageArguments() { @@ -352,17 +357,17 @@ public void callbackShouldNotBlockCheckoutIfOpenAsyncWorksNotInCurrentThread() t ConnectionPoolSettings.builder() .maxSize(DEFAULT_MAX_CONNECTING + maxAvailableConnections) .addConnectionPoolListener(listener) - .maxWaitTime(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS) .maintenanceInitialDelay(MAX_VALUE, NANOSECONDS) .build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS); acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_CALLBACK, - controllableConnFactory, listener); + controllableConnFactory, listener, timeoutSettings); assertUseConcurrently(provider, 2 * maxAvailableConnections, true, true, 0.02f, 0, 0, - cachedExecutor, SECONDS.toNanos(10)); + cachedExecutor, SECONDS.toNanos(10), timeoutSettings); } /** @@ -391,16 +396,17 @@ public void checkoutHandOverMechanism() throws InterruptedException, TimeoutExce * the max pool size, and then check that no connections were created nonetheless. */ + maxConcurrentlyHandedOver) .addConnectionPoolListener(listener) - .maxWaitTime(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS) .maintenanceInitialDelay(MAX_VALUE, NANOSECONDS) .build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); List connections = new ArrayList<>(); for (int i = 0; i < openConnectionsCount; i++) { - connections.add(provider.get(new OperationContext(), 0, NANOSECONDS)); + connections.add(provider.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(0)))); } - acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_OPEN, controllableConnFactory, listener); + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS); + acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_OPEN, controllableConnFactory, listener, + timeoutSettings); int previousIdx = 0; // concurrently check in / check out and assert the hand-over mechanism works for (int idx = 0; idx < connections.size(); idx += maxConcurrentlyHandedOver) { @@ -416,7 +422,8 @@ public void checkoutHandOverMechanism() throws InterruptedException, TimeoutExce return connectionId; })); Runnable checkOut = () -> receivedFutures.add(cachedExecutor.submit(() -> { - InternalConnection connection = provider.get(new OperationContext(), TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS); + InternalConnection connection = + provider.get(createOperationContext(timeoutSettings)); return connection.getDescription().getConnectionId(); })); if (ThreadLocalRandom.current().nextBoolean()) { @@ -449,7 +456,7 @@ public void readyAfterCloseMustNotThrow() { SERVER_ID, connectionFactory, ConnectionPoolSettings.builder().maxSize(1).build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.close(); provider.ready(); } @@ -460,7 +467,7 @@ public void invalidateAfterCloseMustNotThrow() { SERVER_ID, connectionFactory, ConnectionPoolSettings.builder().maxSize(1).build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); provider.ready(); provider.close(); provider.invalidate(null); @@ -474,7 +481,7 @@ public void readyInvalidateConcurrentWithCloseMustNotThrow() throws ExecutionExc SERVER_ID, connectionFactory, ConnectionPoolSettings.builder().maxSize(1).build(), - mockSdamProvider()); + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); try { readyAndInvalidateResult = cachedExecutor.submit(() -> { provider.ready(); @@ -490,14 +497,15 @@ public void readyInvalidateConcurrentWithCloseMustNotThrow() throws ExecutionExc } private static void assertUseConcurrently(final DefaultConnectionPool pool, final int concurrentUsersCount, - final boolean sync, final boolean async, - final float invalidateAndReadyProb, final float invalidateProb, final float readyProb, - final ExecutorService executor, final long durationNanos) throws InterruptedException { + final boolean sync, final boolean async, + final float invalidateAndReadyProb, final float invalidateProb, final float readyProb, + final ExecutorService executor, final long durationNanos, + final TimeoutSettings timeoutSettings) throws InterruptedException { try { useConcurrently(pool, concurrentUsersCount, sync, async, invalidateAndReadyProb, invalidateProb, readyProb, - executor, durationNanos); + executor, durationNanos, timeoutSettings); } catch (TimeoutException | ExecutionException e) { throw new AssertionError(e); } @@ -506,7 +514,8 @@ private static void assertUseConcurrently(final DefaultConnectionPool pool, fina private static void useConcurrently(final DefaultConnectionPool pool, final int concurrentUsersCount, final boolean checkoutSync, final boolean checkoutAsync, final float invalidateAndReadyProb, final float invalidateProb, final float readyProb, - final ExecutorService executor, final long durationNanos) + final ExecutorService executor, final long durationNanos, + final TimeoutSettings timeoutSettings) throws ExecutionException, InterruptedException, TimeoutException { assertTrue(invalidateAndReadyProb >= 0 && invalidateAndReadyProb <= 1); Runnable spontaneouslyInvalidateReady = () -> { @@ -522,15 +531,18 @@ private static void useConcurrently(final DefaultConnectionPool pool, final int } }; Collection> tasks = new ArrayList<>(); - Timeout duration = Timeout.startNow(durationNanos); + Timeout timeout = Timeout.expiresIn(durationNanos, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); for (int i = 0; i < concurrentUsersCount; i++) { if ((checkoutSync && checkoutAsync) ? i % 2 == 0 : checkoutSync) {//check out synchronously and check in tasks.add(executor.submit(() -> { - while (!(duration.expired() || Thread.currentThread().isInterrupted())) { + while (!Thread.currentThread().isInterrupted()) { + if (timeout.call(NANOSECONDS, () -> false, (ns) -> false, () -> true)) { + break; + } spontaneouslyInvalidateReady.run(); InternalConnection conn = null; try { - conn = pool.get(new OperationContext(), TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS); + conn = pool.get(createOperationContext(timeoutSettings)); } catch (MongoConnectionPoolClearedException e) { // expected because we spontaneously invalidate `pool` } finally { @@ -542,10 +554,13 @@ private static void useConcurrently(final DefaultConnectionPool pool, final int })); } else if (checkoutAsync) {//check out asynchronously and check in tasks.add(executor.submit(() -> { - while (!(duration.expired() || Thread.currentThread().isInterrupted())) { + while (!Thread.currentThread().isInterrupted()) { + if (TimePointTest.hasExpired(timeout)) { + break; + } spontaneouslyInvalidateReady.run(); CompletableFuture futureCheckOutCheckIn = new CompletableFuture<>(); - pool.getAsync(new OperationContext(), (conn, t) -> { + pool.getAsync(createOperationContext(timeoutSettings), (conn, t) -> { if (t != null) { if (t instanceof MongoConnectionPoolClearedException) { futureCheckOutCheckIn.complete(null); // expected because we spontaneously invalidate `pool` @@ -590,23 +605,24 @@ private static void sleepMillis(final long millis) { * This results in acquiring permits to open a connection and leaving them acquired. */ private static void acquireOpenPermits(final DefaultConnectionPool pool, final int openPermitsCount, - final InfiniteCheckoutEmulation infiniteEmulation, - final ControllableConnectionFactory controllableConnFactory, - final TestConnectionPoolListener listener) throws TimeoutException, InterruptedException { + final InfiniteCheckoutEmulation infiniteEmulation, + final ControllableConnectionFactory controllableConnFactory, + final TestConnectionPoolListener listener, + final TimeoutSettings timeoutSettings) throws TimeoutException, InterruptedException { assertTrue(openPermitsCount <= DEFAULT_MAX_CONNECTING); int initialCreatedEventCount = listener.countEvents(ConnectionCreatedEvent.class); switch (infiniteEmulation) { case INFINITE_CALLBACK: { for (int i = 0; i < openPermitsCount; i++) { SingleResultCallback infiniteCallback = (result, t) -> sleepMillis(MAX_VALUE); - pool.getAsync(new OperationContext(), infiniteCallback); + pool.getAsync(createOperationContext(timeoutSettings), infiniteCallback); } break; } case INFINITE_OPEN: { controllableConnFactory.openDurationHandle.set(Duration.ofMillis(MAX_VALUE), openPermitsCount); for (int i = 0; i < openPermitsCount; i++) { - pool.getAsync(new OperationContext(), (result, t) -> {}); + pool.getAsync(createOperationContext(timeoutSettings), (result, t) -> {}); } controllableConnFactory.openDurationHandle.await(Duration.ofMillis(TEST_WAIT_TIMEOUT_MILLIS)); break; @@ -637,15 +653,15 @@ private static ControllableConnectionFactory newControllableConnectionFactory(fi doAnswer(invocation -> { doOpen.run(); return null; - }).when(connection).open(); + }).when(connection).open(any()); doAnswer(invocation -> { - SingleResultCallback callback = invocation.getArgument(0, SingleResultCallback.class); + SingleResultCallback callback = invocation.getArgument(1, SingleResultCallback.class); asyncOpenExecutor.execute(() -> { doOpen.run(); callback.onResult(null, null); }); return null; - }).when(connection).openAsync(any()); + }).when(connection).openAsync(any(), any()); return connection; }; return new ControllableConnectionFactory(connectionFactory, openDurationHandle); diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy index 6a78ce97f7c..cc3e0401bb5 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy @@ -36,6 +36,7 @@ import javax.security.auth.Subject import javax.security.auth.login.LoginContext import static com.mongodb.AuthenticationMechanism.GSSAPI +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getClusterConnectionMode import static com.mongodb.ClusterFixture.getConnectionString import static com.mongodb.ClusterFixture.getCredential @@ -57,7 +58,7 @@ class GSSAPIAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: thrown(MongoCommandException) @@ -76,7 +77,7 @@ class GSSAPIAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: true @@ -98,7 +99,7 @@ class GSSAPIAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: thrown(MongoSecurityException) @@ -130,7 +131,7 @@ class GSSAPIAuthenticationSpecification extends Specification { def connection = createConnection(async, getMongoCredential(subject)) openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: true @@ -174,7 +175,7 @@ class GSSAPIAuthenticationSpecification extends Specification { def connection = createConnection(async, getMongoCredential(saslClientProperties)) openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: true @@ -218,10 +219,10 @@ class GSSAPIAuthenticationSpecification extends Specification { private static void openConnection(final InternalConnection connection, final boolean async) { if (async) { FutureResultCallback futureResultCallback = new FutureResultCallback() - connection.openAsync(futureResultCallback) + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS) } else { - connection.open() + connection.open(OPERATION_CONTEXT) } } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy index 9f2ca47b9ee..f18a6915e38 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy @@ -29,6 +29,7 @@ import spock.lang.Specification import javax.security.auth.login.LoginContext import static com.mongodb.AuthenticationMechanism.GSSAPI +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getLoginContextName import static com.mongodb.ClusterFixture.getPrimary import static com.mongodb.ClusterFixture.getServerApi @@ -53,7 +54,7 @@ class GSSAPIAuthenticatorSpecification extends Specification { .create(new ServerId(new ClusterId(), getPrimary())) when: - internalConnection.open() + internalConnection.open(OPERATION_CONTEXT) then: 1 * subjectProvider.getSubject() >> subject diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy index e57627ce325..e8c2a408220 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy @@ -32,6 +32,7 @@ import spock.lang.IgnoreIf import spock.lang.Specification import static com.mongodb.AuthenticationMechanism.PLAIN +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getClusterConnectionMode import static com.mongodb.ClusterFixture.getConnectionString import static com.mongodb.ClusterFixture.getCredential @@ -51,7 +52,7 @@ class PlainAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: thrown(MongoCommandException) @@ -70,7 +71,7 @@ class PlainAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: true @@ -89,7 +90,7 @@ class PlainAuthenticationSpecification extends Specification { when: openConnection(connection, async) executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), - getClusterConnectionMode(), null, connection) + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) then: thrown(MongoSecurityException) @@ -122,10 +123,10 @@ class PlainAuthenticationSpecification extends Specification { private static void openConnection(final InternalConnection connection, final boolean async) { if (async) { FutureResultCallback futureResultCallback = new FutureResultCallback() - connection.openAsync(futureResultCallback) + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS) } else { - connection.open() + connection.open(OPERATION_CONTEXT) } } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java index e2377c8efef..6ab01fdfc8a 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java @@ -32,6 +32,7 @@ import java.util.Collections; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.ClusterFixture.getClusterConnectionMode; import static com.mongodb.ClusterFixture.getServerApi; import static com.mongodb.ClusterFixture.getSslSettings; @@ -67,14 +68,14 @@ public void tearDown() { public void testSuccessfulAuthentication() { PlainAuthenticator authenticator = new PlainAuthenticator(getCredentialWithCache(userName, source, password.toCharArray()), getClusterConnectionMode(), getServerApi()); - authenticator.authenticate(internalConnection, connectionDescription); + authenticator.authenticate(internalConnection, connectionDescription, OPERATION_CONTEXT); } @Test(expected = MongoSecurityException.class) public void testUnsuccessfulAuthentication() { PlainAuthenticator authenticator = new PlainAuthenticator(getCredentialWithCache(userName, source, "wrong".toCharArray()), getClusterConnectionMode(), getServerApi()); - authenticator.authenticate(internalConnection, connectionDescription); + authenticator.authenticate(internalConnection, connectionDescription, OPERATION_CONTEXT); } private static MongoCredentialWithCache getCredentialWithCache(final String userName, final String source, final char[] password) { diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy index 44205922a0a..faffded597e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy @@ -16,13 +16,11 @@ package com.mongodb.internal.connection - import com.mongodb.MongoCredential import com.mongodb.MongoSecurityException import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.async.FutureResultCallback -import com.mongodb.internal.IgnorableRequestContext import com.mongodb.internal.binding.AsyncClusterBinding import com.mongodb.internal.binding.ClusterBinding import com.mongodb.internal.operation.CommandReadOperation @@ -35,10 +33,10 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.createAsyncCluster import static com.mongodb.ClusterFixture.createCluster import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.getServerApi import static com.mongodb.ClusterFixture.isAuthenticated import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.MongoCredential.createCredential @@ -95,7 +93,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def dropUser(final String userName) { new CommandReadOperation<>('admin', new BsonDocument('dropUser', new BsonString(userName)), - new BsonDocumentCodec()).execute(getBinding()) + new BsonDocumentCodec()).execute(getBinding()) } def 'test authentication and authorization'() { @@ -105,8 +103,7 @@ class ScramSha256AuthenticationSpecification extends Specification { when: new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) - .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE)) + .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) then: noExceptionThrown() @@ -127,8 +124,7 @@ class ScramSha256AuthenticationSpecification extends Specification { // make this synchronous new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) - .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE), + .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), callback) callback.get() @@ -149,8 +145,7 @@ class ScramSha256AuthenticationSpecification extends Specification { when: new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) - .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE)) + .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) then: thrown(MongoSecurityException) @@ -170,8 +165,8 @@ class ScramSha256AuthenticationSpecification extends Specification { when: new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) - .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE), callback) + .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), + callback) callback.get() then: @@ -191,8 +186,7 @@ class ScramSha256AuthenticationSpecification extends Specification { when: new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) - .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE)) + .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) then: noExceptionThrown() @@ -212,8 +206,8 @@ class ScramSha256AuthenticationSpecification extends Specification { when: new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) - .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), - IgnorableRequestContext.INSTANCE), callback) + .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), + callback) callback.get() then: diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java index 17dc3b6cfcf..0295e8c1f9f 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java @@ -23,6 +23,7 @@ import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.selector.ServerAddressSelector; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.ClusterFixture.getAsyncCluster; import static com.mongodb.ClusterFixture.getCluster; import static com.mongodb.assertions.Assertions.fail; @@ -52,7 +53,8 @@ public static void waitForLastRelease(final Cluster cluster) { } public static void waitForLastRelease(final ServerAddress address, final Cluster cluster) { - ConcurrentPool pool = getConnectionPool(address, cluster); + ConcurrentPool pool = connectionPool( + cluster.selectServer(new ServerAddressSelector(address), OPERATION_CONTEXT).getServer()); long startTime = System.currentTimeMillis(); while (pool.getInUseCount() > 0) { try { @@ -68,7 +70,7 @@ public static void waitForLastRelease(final ServerAddress address, final Cluster } private static ConcurrentPool getConnectionPool(final ServerAddress address, final Cluster cluster) { - return connectionPool(cluster.selectServer(new ServerAddressSelector(address), new OperationContext()).getServer()); + return connectionPool(cluster.selectServer(new ServerAddressSelector(address), OPERATION_CONTEXT).getServer()); } private static void checkPool(final ServerAddress address, final Cluster cluster) { diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy index 0f2ba70d4c0..266f4e88996 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy @@ -34,6 +34,7 @@ import org.bson.types.ObjectId import java.util.concurrent.CountDownLatch import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY import static com.mongodb.ClusterFixture.getClusterConnectionMode import static com.mongodb.ClusterFixture.getCredentialWithCache import static com.mongodb.ClusterFixture.getPrimary @@ -220,11 +221,12 @@ class ServerMonitorSpecification extends OperationFunctionalSpecification { } } serverMonitor = new DefaultServerMonitor(new ServerId(new ClusterId(), address), ServerSettings.builder().build(), - new InternalStreamConnectionFactory(SINGLE, new SocketStreamFactory(new DefaultInetAddressResolver(), + new InternalStreamConnectionFactory(SINGLE, new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().connectTimeout(500, TimeUnit.MILLISECONDS).build(), getSslSettings()), getCredentialWithCache(), null, null, [], LoggerSettings.builder().build(), null, getServerApi()), - getClusterConnectionMode(), getServerApi(), false, SameObjectProvider.initialized(sdam)) + getClusterConnectionMode(), getServerApi(), false, SameObjectProvider.initialized(sdam), + OPERATION_CONTEXT_FACTORY) serverMonitor.start() serverMonitor } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java index e715bfb5cd1..ae7166300e8 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java @@ -25,8 +25,6 @@ import com.mongodb.connection.ConnectionPoolSettings; import com.mongodb.connection.ServerSettings; import com.mongodb.connection.SocketSettings; -import com.mongodb.internal.IgnorableRequestContext; -import com.mongodb.internal.binding.StaticBindingContext; import com.mongodb.internal.selector.ServerAddressSelector; import com.mongodb.internal.validator.NoOpFieldNameValidator; import org.bson.BsonDocument; @@ -38,6 +36,8 @@ import java.util.Collections; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY; import static com.mongodb.ClusterFixture.getCredential; import static com.mongodb.ClusterFixture.getDefaultDatabaseName; import static com.mongodb.ClusterFixture.getPrimary; @@ -66,8 +66,7 @@ private void setUpCluster(final ServerAddress serverAddress) { clusterSettings, new DefaultClusterableServerFactory(ServerSettings.builder().build(), ConnectionPoolSettings.builder().maxSize(1).build(), InternalConnectionPoolSettings.builder().build(), - streamFactory, streamFactory, getCredential(), - + OPERATION_CONTEXT_FACTORY, streamFactory, OPERATION_CONTEXT_FACTORY, streamFactory, getCredential(), LoggerSettings.builder().build(), null, null, null, Collections.emptyList(), getServerApi(), false)); } @@ -93,7 +92,7 @@ public void shouldGetServerWithOkDescription() { setUpCluster(getPrimary()); // when - ServerTuple serverTuple = cluster.selectServer(clusterDescription -> getPrimaries(clusterDescription), new OperationContext()); + ServerTuple serverTuple = cluster.selectServer(clusterDescription -> getPrimaries(clusterDescription), OPERATION_CONTEXT); // then assertTrue(serverTuple.getServerDescription().isOk()); @@ -102,17 +101,16 @@ public void shouldGetServerWithOkDescription() { @Test public void shouldSuccessfullyQueryASecondaryWithPrimaryReadPreference() { // given + OperationContext operationContext = OPERATION_CONTEXT; ServerAddress secondary = getSecondary(); setUpCluster(secondary); String collectionName = getClass().getName(); - Connection connection = cluster.selectServer(new ServerAddressSelector(secondary), new OperationContext()).getServer() - .getConnection(new OperationContext()); + Connection connection = cluster.selectServer(new ServerAddressSelector(secondary), operationContext).getServer() + .getConnection(operationContext); // when BsonDocument result = connection.command(getDefaultDatabaseName(), new BsonDocument("count", new BsonString(collectionName)), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), - new StaticBindingContext(NoOpSessionContext.INSTANCE, getServerApi(), IgnorableRequestContext.INSTANCE, - new OperationContext())); + new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), operationContext); // then assertEquals(new BsonDouble(1.0).intValue(), result.getNumber("ok").intValue()); diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy index ad5af2f6768..f652c2a0771 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy @@ -30,6 +30,9 @@ import javax.net.ssl.SSLSocket import javax.net.ssl.SSLSocketFactory import java.lang.reflect.Method +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.createOperationContext import static com.mongodb.ClusterFixture.getPrimary import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -44,8 +47,10 @@ class SocketStreamHelperSpecification extends Specification { .readTimeout(10, SECONDS) .build() + def operationContext = createOperationContext(TIMEOUT_SETTINGS.withReadTimeoutMS(socketSettings.getReadTimeout(MILLISECONDS))) + when: - SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketStreamHelper.initialize(operationContext, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), socketSettings, SslSettings.builder().build()) then: @@ -78,7 +83,7 @@ class SocketStreamHelperSpecification extends Specification { Socket socket = SocketFactory.default.createSocket() when: - SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), SocketSettings.builder().build(), SslSettings.builder().build()) then: @@ -94,8 +99,8 @@ class SocketStreamHelperSpecification extends Specification { SSLSocket socket = SSLSocketFactory.default.createSocket() when: - SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), SocketSettings. - builder().build(), sslSettings) + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketSettings.builder().build(), sslSettings) then: socket.getSSLParameters().endpointIdentificationAlgorithm == (sslSettings.invalidHostNameAllowed ? null : 'HTTPS') @@ -115,7 +120,7 @@ class SocketStreamHelperSpecification extends Specification { SSLSocket socket = SSLSocketFactory.default.createSocket() when: - SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), SocketSettings.builder().build(), sslSettings) then: @@ -134,7 +139,7 @@ class SocketStreamHelperSpecification extends Specification { Socket socket = SocketFactory.default.createSocket() when: - SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), SocketSettings.builder().build(), SslSettings.builder().enabled(true).build()) then: diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy index 7fcf694723c..42886648a2c 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy @@ -1,18 +1,19 @@ package com.mongodb.internal.connection -import com.mongodb.spi.dns.InetAddressResolver -import util.spock.annotations.Slow import com.mongodb.MongoSocketOpenException import com.mongodb.ServerAddress import com.mongodb.connection.SocketSettings import com.mongodb.connection.SslSettings +import com.mongodb.spi.dns.InetAddressResolver import spock.lang.Ignore import spock.lang.IgnoreIf import spock.lang.Specification +import util.spock.annotations.Slow import javax.net.SocketFactory import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getSslSettings class StreamSocketAddressSpecification extends Specification { @@ -43,7 +44,7 @@ class StreamSocketAddressSpecification extends Specification { def socketStream = new SocketStream(serverAddress, null, socketSettings, sslSettings, socketFactory, bufferProvider) when: - socketStream.open() + socketStream.open(OPERATION_CONTEXT) then: !socket0.isConnected() @@ -82,7 +83,7 @@ class StreamSocketAddressSpecification extends Specification { def socketStream = new SocketStream(serverAddress, inetAddressResolver, socketSettings, sslSettings, socketFactory, bufferProvider) when: - socketStream.open() + socketStream.open(OPERATION_CONTEXT) then: thrown(MongoSocketOpenException) diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java b/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java index c8274f382fc..704dea56f44 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java @@ -43,6 +43,8 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Predicate; +import java.util.stream.Collectors; import static com.mongodb.ClusterFixture.TIMEOUT; import static com.mongodb.internal.connection.InternalStreamConnection.getSecuritySensitiveCommands; @@ -178,29 +180,50 @@ public CommandFailedEvent getCommandFailedEvent(final String commandName) { .orElseThrow(() -> new IllegalArgumentException(commandName + " not found in command failed event list")); } - public List getCommandStartedEvents() { - return getCommandStartedEvents(Integer.MAX_VALUE); + public List getCommandFailedEvents() { + return getEvents(CommandFailedEvent.class, Integer.MAX_VALUE); } - private List getCommandStartedEvents(final int maxEvents) { + public List getCommandFailedEvents(final String commandName) { + return getEvents(CommandFailedEvent.class, + commandEvent -> commandEvent.getCommandName().equals(commandName), + Integer.MAX_VALUE); + } + + public List getCommandStartedEvents() { + return getEvents(CommandStartedEvent.class, Integer.MAX_VALUE); + } + + public List getCommandStartedEvents(final String commandName) { + return getEvents(CommandStartedEvent.class, + commandEvent -> commandEvent.getCommandName().equals(commandName), + Integer.MAX_VALUE); + } + + public List getCommandSucceededEvents() { + return getEvents(CommandSucceededEvent.class, Integer.MAX_VALUE); + } + + private List getEvents(final Class type, final int maxEvents) { + return getEvents(type, e -> true, maxEvents); + } + + private List getEvents(final Class type, + final Predicate filter, + final int maxEvents) { lock.lock(); try { - List commandStartedEvents = new ArrayList<>(); - for (CommandEvent cur : getEvents()) { - if (cur instanceof CommandStartedEvent) { - commandStartedEvents.add(cur); - } - if (commandStartedEvents.size() == maxEvents) { - break; - } - } - return commandStartedEvents; + return getEvents().stream() + .filter(e -> e.getClass() == type) + .filter(filter) + .map(type::cast) + .limit(maxEvents).collect(Collectors.toList()); } finally { lock.unlock(); } } - public List waitForStartedEvents(final int numEvents) { + public List waitForStartedEvents(final int numEvents) { lock.lock(); try { while (!hasCompletedEvents(numEvents)) { @@ -212,7 +235,7 @@ public List waitForStartedEvents(final int numEvents) { throw interruptAndCreateMongoInterruptedException("Interrupted waiting for event", e); } } - return getCommandStartedEvents(numEvents); + return getEvents(CommandStartedEvent.class, numEvents); } finally { lock.unlock(); } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy index 8477a91cc43..a3e309a1f5f 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy @@ -16,7 +16,7 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException + import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadConcern @@ -51,9 +51,8 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import static TestOperationHelper.getKeyPattern +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.collectCursorResults -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getBinding @@ -67,8 +66,6 @@ import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS class AggregateOperationSpecification extends OperationFunctionalSpecification { @@ -87,8 +84,6 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { operation.getAllowDiskUse() == null operation.getBatchSize() == null operation.getCollation() == null - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 operation.getPipeline() == [] } @@ -102,15 +97,11 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { .batchSize(10) .collation(defaultCollation) .hint(hint) - .maxAwaitTime(10, MILLISECONDS) - .maxTime(10, MILLISECONDS) then: operation.getAllowDiskUse() operation.getBatchSize() == 10 operation.getCollation() == defaultCollation - operation.getMaxAwaitTime(MILLISECONDS) == 10 - operation.getMaxTime(MILLISECONDS) == 10 operation.getHint() == hint } @@ -142,18 +133,25 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { when: def pipeline = [new BsonDocument('$match', new BsonDocument('a', new BsonString('A')))] def operation = new AggregateOperation(helper.namespace, pipeline, new DocumentCodec()) + + def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) + .append('pipeline', new BsonArray(pipeline)) + .append('cursor', new BsonDocument()) + + then: + testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) + + when: + operation = new AggregateOperation(helper.namespace, pipeline, new DocumentCodec()) .allowDiskUse(true) .batchSize(10) .collation(defaultCollation) - .maxAwaitTime(15, MILLISECONDS) - .maxTime(10, MILLISECONDS) - def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) + expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) .append('pipeline', new BsonArray(pipeline)) .append('allowDiskUse', new BsonBoolean(true)) .append('collation', defaultCollation.asDocument()) .append('cursor', new BsonDocument('batchSize', new BsonInt32(10))) - .append('maxTimeMS', new BsonInt32(10)) then: testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) @@ -244,7 +242,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { results.containsAll(['Pete', 'Sam']) cleanup: - new DropCollectionOperation(viewNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding(getCluster())) + new DropCollectionOperation(viewNamespace, WriteConcern.ACKNOWLEDGED) + .execute(getBinding(getCluster())) where: async << [true, false] @@ -267,7 +266,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should allow disk usage'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).allowDiskUse(allowDiskUse) + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + .allowDiskUse(allowDiskUse) def cursor = operation.execute(getBinding()) then: @@ -279,7 +279,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should allow batch size'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).batchSize(batchSize) + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + .batchSize(batchSize) def cursor = operation.execute(getBinding()) then: @@ -289,25 +290,6 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { batchSize << [null, 0, 10] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).maxTime(1, SECONDS) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - @IgnoreIf({ serverVersionLessThan(3, 6) }) def 'should be able to explain an empty pipeline'() { given: @@ -367,8 +349,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) .comment(new BsonString(expectedComment)) @@ -381,50 +363,30 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { ((Document) profileDocument.get('command')).get('comment') == expectedComment cleanup: - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()).execute(getBinding()) profileCollectionHelper.drop() where: async << [true, false] } - @IgnoreIf({ isSharded() || serverVersionLessThan(3, 2) }) - def 'should be able to respect maxTime with pipeline'() { - given: - enableMaxTimeFailPoint() - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) - .maxTime(10, MILLISECONDS) - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should add read concern to command'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(ReadBinding) def source = Stub(ConnectionSource) def connection = Mock(Connection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.readConnectionSource >> source - binding.sessionContext >> sessionContext source.connection >> connection source.retain() >> source - source.getServerApi() >> null + source.operationContext >> operationContext def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) .append('pipeline', new BsonArray()) .append('cursor', new BsonDocument()) - appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) + appendReadConcernToCommand(operationContext.getSessionContext(), MIN_WIRE_VERSION, commandDocument) def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) @@ -434,7 +396,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.command(_, commandDocument, _, _, _, binding) >> + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) .append('ns', new BsonString(getNamespace().getFullName())) .append('firstBatch', new BsonArrayWrapper([]))) @@ -453,14 +415,13 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should add read concern to command asynchronously'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(AsyncReadBinding) def source = Stub(AsyncConnectionSource) def connection = Mock(AsyncConnection) - binding.serverApi >> null - binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } - binding.sessionContext >> sessionContext - source.serverApi >> null + source.operationContext >> operationContext source.getConnection(_) >> { it[0].onResult(connection, null) } source.retain() >> source def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) @@ -476,7 +437,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> { + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) .append('ns', new BsonString(getNamespace().getFullName())) .append('firstBatch', new BsonArrayWrapper([]))), null) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy index a7aa377e855..496e7311949 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.MongoCommandException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.MongoWriteConcernException import com.mongodb.OperationFunctionalSpecification @@ -29,6 +28,7 @@ import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.Filters import com.mongodb.client.model.ValidationOptions import com.mongodb.client.test.CollectionHelper +import com.mongodb.internal.client.model.AggregationLevel import org.bson.BsonArray import org.bson.BsonBoolean import org.bson.BsonDocument @@ -40,17 +40,12 @@ import org.bson.codecs.BsonValueCodecProvider import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.WriteConcern.ACKNOWLEDGED import static com.mongodb.client.model.Filters.gte -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders class AggregateToCollectionOperationSpecification extends OperationFunctionalSpecification { @@ -71,11 +66,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED) then: operation.getAllowDiskUse() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getPipeline() == pipeline operation.getBypassDocumentValidation() == null operation.getWriteConcern() == ACKNOWLEDGED @@ -87,15 +81,14 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, WriteConcern.MAJORITY) + AggregateToCollectionOperation operation = + createOperation(getNamespace(), pipeline, WriteConcern.MAJORITY) .allowDiskUse(true) - .maxTime(10, MILLISECONDS) .bypassDocumentValidation(true) .collation(defaultCollation) then: operation.getAllowDiskUse() - operation.getMaxTime(MILLISECONDS) == 10 operation.getBypassDocumentValidation() == true operation.getWriteConcern() == WriteConcern.MAJORITY operation.getCollation() == defaultCollation @@ -106,15 +99,13 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ReadConcern.DEFAULT) + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ReadConcern.DEFAULT) .allowDiskUse(true) - .maxTime(10, MILLISECONDS) .bypassDocumentValidation(true) .collation(defaultCollation) then: operation.getAllowDiskUse() - operation.getMaxTime(MILLISECONDS) == 10 operation.getBypassDocumentValidation() == true operation.getReadConcern() == ReadConcern.DEFAULT operation.getCollation() == defaultCollation @@ -122,7 +113,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should not accept an empty pipeline'() { when: - new AggregateToCollectionOperation(getNamespace(), [], ACKNOWLEDGED) + createOperation(getNamespace(), [], ACKNOWLEDGED) then: @@ -131,10 +122,9 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should be able to output to a collection'() { when: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], - ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], + ACKNOWLEDGED) execute(operation, async) then: @@ -147,9 +137,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe @IgnoreIf({ serverVersionLessThan(4, 2) }) def 'should be able to merge into a collection'() { when: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))]) + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))]) execute(operation, async) then: @@ -161,11 +150,9 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should be able to match then output to a collection'() { when: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), - new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], - ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), + new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], ACKNOWLEDGED) execute(operation, async) then: @@ -175,39 +162,15 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe async << [true, false] } - def 'should throw execution timeout exception from execute'() { - given: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), - new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], - ACKNOWLEDGED) - .maxTime(1, SECONDS) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() }) def 'should throw on write concern error'() { given: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], new WriteConcern(5)) when: - async ? executeAsync(operation) : operation.execute(getBinding()) + execute(operation, async) then: def ex = thrown(MongoWriteConcernException) @@ -227,8 +190,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(BsonDocument.parse('{ level: 9 }')) when: - def operation = new AggregateToCollectionOperation(getNamespace(), [BsonDocument.parse('{$out: "collectionOut"}')], - ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [BsonDocument.parse('{$out: "collectionOut"}')], ACKNOWLEDGED) execute(operation, async) then: @@ -256,7 +219,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should create the expected command'() { when: def pipeline = [BsonDocument.parse('{$out: "collectionOut"}')] - def operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ReadConcern.MAJORITY, WriteConcern.MAJORITY) + AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, + ReadConcern.MAJORITY, WriteConcern.MAJORITY) .bypassDocumentValidation(true) def expectedCommand = new BsonDocument('aggregate', new BsonString(getNamespace().getCollectionName())) .append('pipeline', new BsonArray(pipeline)) @@ -298,7 +262,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(BsonDocument.parse('{_id: 1, str: "foo"}')) def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}'), new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] - def operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ACKNOWLEDGED).collation(defaultCollation) + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED) .collation(caseInsensitiveCollation) when: @@ -315,10 +279,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' - def operation = new AggregateToCollectionOperation(getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [Aggregates.out('outputCollection').toBsonDocument(BsonDocument, registry)], ACKNOWLEDGED) .comment(new BsonString(expectedComment)) @@ -330,11 +294,24 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe ((Document) profileDocument.get('command')).get('comment') == expectedComment cleanup: - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()).execute(getBinding()) profileCollectionHelper.drop() where: async << [true, false] } + + def createOperation(final MongoNamespace namespace, final List pipeline) { + new AggregateToCollectionOperation(namespace, pipeline, null, null, AggregationLevel.COLLECTION) + } + + def createOperation(final MongoNamespace namespace, final List pipeline, final WriteConcern writeConcern) { + new AggregateToCollectionOperation(namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION) + } + + def createOperation(final MongoNamespace namespace, final List pipeline, final ReadConcern readConcern) { + new AggregateToCollectionOperation(namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION) + } + } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java index 3b8addf6596..93449a6558b 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java @@ -21,6 +21,7 @@ import com.mongodb.MongoQueryException; import com.mongodb.ReadPreference; import com.mongodb.ServerCursor; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.OperationTest; import com.mongodb.internal.binding.AsyncConnectionSource; @@ -105,7 +106,7 @@ void cleanup() { @DisplayName("server cursor should not be null") void theServerCursorShouldNotBeNull() { BsonDocument commandResult = executeFindCommand(2); - cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursor.getServerCursor()); @@ -115,7 +116,7 @@ void theServerCursorShouldNotBeNull() { @DisplayName("should get Exceptions for operations on the cursor after closing") void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { BsonDocument commandResult = executeFindCommand(5); - cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); cursor.close(); @@ -130,7 +131,7 @@ void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { @DisplayName("should throw an Exception when going off the end") void shouldThrowAnExceptionWhenGoingOffTheEnd() { BsonDocument commandResult = executeFindCommand(2, 1); - cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); cursorNext(); @@ -144,7 +145,7 @@ void shouldThrowAnExceptionWhenGoingOffTheEnd() { @DisplayName("test normal exhaustion") void testNormalExhaustion() { BsonDocument commandResult = executeFindCommand(); - cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(10, cursorFlatten().size()); @@ -155,7 +156,7 @@ void testNormalExhaustion() { @DisplayName("test limit exhaustion") void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) { BsonDocument commandResult = executeFindCommand(limit, batchSize); - cursor = new AsyncCommandBatchCursor<>(commandResult, batchSize, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, batchSize, 0, DOCUMENT_DECODER, null, connectionSource, connection); @@ -174,7 +175,7 @@ void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, fi BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData); - cursor = new AsyncCommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER, null, connectionSource, connection); assertFalse(cursor.isClosed()); @@ -197,7 +198,7 @@ void testTailableInterrupt() throws InterruptedException { BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); - cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); CountDownLatch latch = new CountDownLatch(1); @@ -230,7 +231,7 @@ void testTailableInterrupt() throws InterruptedException { void shouldKillCursorIfLimitIsReachedOnInitialQuery() { assumeFalse(isSharded()); BsonDocument commandResult = executeFindCommand(5, 10); - cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursorNext()); @@ -243,7 +244,7 @@ void shouldKillCursorIfLimitIsReachedOnInitialQuery() { void shouldKillCursorIfLimitIsReachedOnGetMore() { assumeFalse(isSharded()); BsonDocument commandResult = executeFindCommand(5, 3); - cursor = new AsyncCommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, null, connectionSource, connection); ServerCursor serverCursor = cursor.getServerCursor(); @@ -261,8 +262,9 @@ void shouldKillCursorIfLimitIsReachedOnGetMore() { @DisplayName("should release connection source if limit is reached on initial query") void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); - cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); @@ -275,7 +277,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { assumeFalse(isSharded()); BsonDocument commandResult = executeFindCommand(5, 3); - cursor = new AsyncCommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursorNext()); @@ -288,7 +290,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { @DisplayName("test limit with get more") void testLimitWithGetMore() { BsonDocument commandResult = executeFindCommand(5, 2); - cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursorNext()); @@ -311,7 +313,7 @@ void testLimitWithLargeDocuments() { ); BsonDocument commandResult = executeFindCommand(300, 0); - cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(300, cursorFlatten().size()); @@ -321,7 +323,7 @@ void testLimitWithLargeDocuments() { @DisplayName("should respect batch size") void shouldRespectBatchSize() { BsonDocument commandResult = executeFindCommand(2); - cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(2, cursor.getBatchSize()); @@ -338,7 +340,7 @@ void shouldRespectBatchSize() { @DisplayName("should throw cursor not found exception") void shouldThrowCursorNotFoundException() throws Throwable { BsonDocument commandResult = executeFindCommand(2); - cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); ServerCursor serverCursor = cursor.getServerCursor(); @@ -347,7 +349,7 @@ void shouldThrowCursorNotFoundException() throws Throwable { this.block(cb -> localConnection.commandAsync(getNamespace().getDatabaseName(), new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource, cb)); + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext(), cb)); localConnection.release(); cursorNext(); @@ -412,9 +414,8 @@ private BsonDocument executeFindCommand(final BsonDocument filter, final int lim } BsonDocument results = block(cb -> connection.commandAsync(getDatabaseName(), findCommand, - NO_OP_FIELD_NAME_VALIDATOR, readPreference, - CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), - connectionSource, cb)); + NO_OP_FIELD_NAME_VALIDATOR, readPreference, CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), + connectionSource.getOperationContext(), cb)); assertNotNull(results); return results; diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java new file mode 100644 index 00000000000..53b2d78eae2 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java @@ -0,0 +1,202 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerAddress; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.connection.ServerVersion; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.Decoder; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +class AsyncCommandBatchCursorTest { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("test", "test"); + private static final BsonInt64 CURSOR_ID = new BsonInt64(1); + private static final BsonDocument COMMAND_CURSOR_DOCUMENT = new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.getFullName())) + .append("id", CURSOR_ID) + .append("firstBatch", new BsonArrayWrapper<>(new BsonArray()))); + + private static final Decoder DOCUMENT_CODEC = new DocumentCodec(); + + + private AsyncConnection mockConnection; + private ConnectionDescription mockDescription; + private AsyncConnectionSource connectionSource; + private OperationContext operationContext; + private TimeoutContext timeoutContext; + private ServerDescription serverDescription; + + @BeforeEach + void setUp() { + ServerVersion serverVersion = new ServerVersion(3, 6); + + mockConnection = mock(AsyncConnection.class, "connection"); + mockDescription = mock(ConnectionDescription.class); + when(mockDescription.getMaxWireVersion()).thenReturn(getMaxWireVersionForServerVersion(serverVersion.getVersionList())); + when(mockDescription.getServerType()).thenReturn(ServerType.LOAD_BALANCER); + when(mockConnection.getDescription()).thenReturn(mockDescription); + when(mockConnection.retain()).thenReturn(mockConnection); + + connectionSource = mock(AsyncConnectionSource.class); + operationContext = mock(OperationContext.class); + timeoutContext = mock(TimeoutContext.class); + serverDescription = mock(ServerDescription.class); + when(operationContext.getTimeoutContext()).thenReturn(timeoutContext); + when(connectionSource.getOperationContext()).thenReturn(operationContext); + doAnswer(invocation -> { + SingleResultCallback callback = invocation.getArgument(0); + callback.onResult(mockConnection, null); + return null; + }).when(connectionSource).getConnection(any()); + when(connectionSource.getServerDescription()).thenReturn(serverDescription); + } + + + @Test + void shouldSkipKillsCursorsCommandWhenNetworkErrorOccurs() { + //given + doAnswer(invocation -> { + SingleResultCallback argument = invocation.getArgument(6); + argument.onResult(null, new MongoSocketException("test", new ServerAddress())); + return null; + }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(); + + //when + commandBatchCursor.next((result, t) -> { + Assertions.assertNull(result); + Assertions.assertNotNull(t); + Assertions.assertEquals(MongoSocketException.class, t.getClass()); + }); + + //then + commandBatchCursor.close(); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + } + + + @Test + void shouldNotSkipKillsCursorsCommandWhenTimeoutExceptionDoesNotHaveNetworkErrorCause() { + //given + doAnswer(invocation -> { + SingleResultCallback argument = invocation.getArgument(6); + argument.onResult(null, new MongoOperationTimeoutException("test")); + return null; + }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + when(timeoutContext.hasTimeoutMS()).thenReturn(true); + + AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(); + + //when + commandBatchCursor.next((result, t) -> { + Assertions.assertNull(result); + Assertions.assertNotNull(t); + Assertions.assertEquals(MongoOperationTimeoutException.class, t.getClass()); + }); + + commandBatchCursor.close(); + + + //then + verify(mockConnection, times(2)).commandAsync(any(), + any(), any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any(), any()); + } + + @Test + void shouldSkipKillsCursorsCommandWhenTimeoutExceptionHaveNetworkErrorCause() { + //given + doAnswer(invocation -> { + SingleResultCallback argument = invocation.getArgument(6); + argument.onResult(null, new MongoOperationTimeoutException("test", new MongoSocketException("test", new ServerAddress()))); + return null; + }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + when(timeoutContext.hasTimeoutMS()).thenReturn(true); + + AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(); + + //when + commandBatchCursor.next((result, t) -> { + Assertions.assertNull(result); + Assertions.assertNotNull(t); + Assertions.assertEquals(MongoOperationTimeoutException.class, t.getClass()); + }); + + commandBatchCursor.close(); + + //then + verify(mockConnection, times(1)).commandAsync(any(), + any(), any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any(), any()); + verify(mockConnection, never()).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any(), any()); + } + + + private AsyncCommandBatchCursor createBatchCursor() { + return new AsyncCommandBatchCursor( + TimeoutMode.CURSOR_LIFETIME, + COMMAND_CURSOR_DOCUMENT, + 0, + 0, + DOCUMENT_CODEC, + null, + connectionSource, + mockConnection); + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy index 129289bfbba..34187b34e62 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy @@ -52,6 +52,7 @@ import org.bson.codecs.DocumentCodec import org.bson.codecs.ValueCodecProvider import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getCluster import static com.mongodb.ClusterFixture.isStandalone @@ -60,7 +61,6 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.client.model.changestream.ChangeStreamDocument.createCodec import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion -import static java.util.concurrent.TimeUnit.MILLISECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders @IgnoreIf({ !(serverVersionAtLeast(3, 6) && !isStandalone()) }) @@ -68,32 +68,29 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should have the correct defaults'() { when: - ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), FullDocument.DEFAULT, - FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) + ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), + FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) then: operation.getBatchSize() == null operation.getCollation() == null operation.getFullDocument() == FullDocument.DEFAULT - operation.getMaxAwaitTime(MILLISECONDS) == 0 operation.getPipeline() == [] operation.getStartAtOperationTime() == null } def 'should set optional values correctly'() { when: - ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), FullDocument.UPDATE_LOOKUP, - FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) + ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) .batchSize(5) .collation(defaultCollation) - .maxAwaitTime(15, MILLISECONDS) .startAtOperationTime(new BsonTimestamp(99)) then: operation.getBatchSize() == 5 operation.getCollation() == defaultCollation operation.getFullDocument() == FullDocument.UPDATE_LOOKUP - operation.getMaxAwaitTime(MILLISECONDS) == 15 operation.getStartAtOperationTime() == new BsonTimestamp(99) } @@ -115,10 +112,9 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio .append('firstBatch', new BsonArrayWrapper([]))) def operation = new ChangeStreamOperation(namespace, FullDocument.DEFAULT, - FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel) + FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel as ChangeStreamLevel) .batchSize(5) .collation(defaultCollation) - .maxAwaitTime(15, MILLISECONDS) .startAtOperationTime(new BsonTimestamp()) def expectedCommand = new BsonDocument('aggregate', aggregate) @@ -390,8 +386,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "rename"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, - FullDocumentBeforeChange.DEFAULT, pipeline, + def operation = new ChangeStreamOperation(helper.getNamespace(), + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) def newNamespace = new MongoNamespace('JavaDriverTest', 'newCollectionName') helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -625,8 +621,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should support hasNext on the sync API'() { given: def helper = getHelper() - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange - .DEFAULT, [], CODEC) + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) when: def cursor = execute(operation, false) @@ -642,15 +638,16 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should set the startAtOperationTime on the sync cursor'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + getOperationTime() >> new BsonTimestamp() + }) def changeStream def binding = Stub(ReadBinding) { - getSessionContext() >> Stub(SessionContext) { - getReadConcern() >> ReadConcern.DEFAULT - getOperationTime() >> new BsonTimestamp() - } - getServerApi() >> null + getOperationContext() >> operationContext getReadConnectionSource() >> Stub(ConnectionSource) { - getServerApi() >> null + getOperationContext() >> operationContext getConnection() >> Stub(Connection) { command(*_) >> { changeStream = getChangeStream(it[1]) @@ -666,7 +663,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio } when: 'set resumeAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .resumeAfter(new BsonDocument()) .execute(binding) @@ -675,7 +673,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio !changeStream.containsKey('startAtOperationTime') when: 'set startAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAfter(new BsonDocument()) .execute(binding) @@ -685,7 +684,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio when: 'set startAtOperationTime' def startAtTime = new BsonTimestamp(42) - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAtOperationTime(startAtTime) .execute(binding) @@ -695,16 +695,17 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should set the startAtOperationTime on the async cursor'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + getOperationTime() >> new BsonTimestamp() + }) def changeStream def binding = Stub(AsyncReadBinding) { - getServerApi() >> null - getSessionContext() >> Stub(SessionContext) { - getReadConcern() >> ReadConcern.DEFAULT - getOperationTime() >> new BsonTimestamp() - } + getOperationContext() >> operationContext getReadConnectionSource(_) >> { it.last().onResult(Stub(AsyncConnectionSource) { - getServerApi() >> null + getOperationContext() >> operationContext getConnection(_) >> { it.last().onResult(Stub(AsyncConnection) { commandAsync(*_) >> { @@ -723,7 +724,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio } when: 'set resumeAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .resumeAfter(new BsonDocument()) .executeAsync(binding, Stub(SingleResultCallback)) @@ -732,7 +734,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio !changeStream.containsKey('startAtOperationTime') when: 'set startAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAfter(new BsonDocument()) .executeAsync(binding, Stub(SingleResultCallback)) @@ -742,7 +745,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio when: 'set startAtOperationTime' def startAtTime = new BsonTimestamp(42) - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAtOperationTime(startAtTime) .executeAsync(binding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java index 30a74443633..7b9fd7b4e57 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java @@ -16,11 +16,11 @@ package com.mongodb.internal.operation; - import com.mongodb.MongoCursorNotFoundException; import com.mongodb.MongoQueryException; import com.mongodb.ReadPreference; import com.mongodb.ServerCursor; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.OperationTest; import com.mongodb.internal.binding.ConnectionSource; @@ -104,7 +104,7 @@ void cleanup() { @DisplayName("server cursor should not be null") void theServerCursorShouldNotBeNull() { BsonDocument commandResult = executeFindCommand(2); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursor.getServerCursor()); @@ -114,7 +114,7 @@ void theServerCursorShouldNotBeNull() { @DisplayName("test server address should not be null") void theServerAddressShouldNotNull() { BsonDocument commandResult = executeFindCommand(); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursor.getServerAddress()); @@ -124,7 +124,7 @@ void theServerAddressShouldNotNull() { @DisplayName("should get Exceptions for operations on the cursor after closing") void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { BsonDocument commandResult = executeFindCommand(); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); cursor.close(); @@ -139,7 +139,7 @@ void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { @DisplayName("should throw an Exception when going off the end") void shouldThrowAnExceptionWhenGoingOffTheEnd() { BsonDocument commandResult = executeFindCommand(1); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); cursor.next(); @@ -151,7 +151,7 @@ void shouldThrowAnExceptionWhenGoingOffTheEnd() { @DisplayName("test cursor remove") void testCursorRemove() { BsonDocument commandResult = executeFindCommand(); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertThrows(UnsupportedOperationException.class, () -> cursor.remove()); @@ -161,7 +161,7 @@ void testCursorRemove() { @DisplayName("test normal exhaustion") void testNormalExhaustion() { BsonDocument commandResult = executeFindCommand(); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(10, cursorFlatten().size()); @@ -172,7 +172,7 @@ void testNormalExhaustion() { @DisplayName("test limit exhaustion") void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) { BsonDocument commandResult = executeFindCommand(limit, batchSize); - cursor = new CommandBatchCursor<>(commandResult, batchSize, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, batchSize, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(expectedTotal, cursorFlatten().size()); @@ -191,7 +191,7 @@ void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, fi BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData); - cursor = new CommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER, null, connectionSource, connection); assertTrue(cursor.hasNext()); @@ -214,10 +214,9 @@ void testTryNextWithTailable() { BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); - cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); - List nextBatch = cursor.tryNext(); assertNotNull(nextBatch); assertEquals(1, nextBatch.get(0).get("_id")); @@ -241,7 +240,7 @@ void hasNextShouldThrowWhenCursorIsClosedInAnotherThread() throws InterruptedExc BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); - cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertTrue(cursor.hasNext()); @@ -268,10 +267,9 @@ void testMaxTimeMS() { long maxTimeMS = 500; BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); - cursor = new CommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER, null, connectionSource, connection); - List nextBatch = cursor.tryNext(); assertNotNull(nextBatch); @@ -293,7 +291,7 @@ void testTailableInterrupt() throws InterruptedException { BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); - cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); CountDownLatch latch = new CountDownLatch(1); @@ -326,7 +324,7 @@ void testTailableInterrupt() throws InterruptedException { void shouldKillCursorIfLimitIsReachedOnInitialQuery() { assumeFalse(isSharded()); BsonDocument commandResult = executeFindCommand(5, 10); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursor.next()); @@ -339,7 +337,7 @@ void shouldKillCursorIfLimitIsReachedOnInitialQuery() { void shouldKillCursorIfLimitIsReachedOnGetMore() { assumeFalse(isSharded()); BsonDocument commandResult = executeFindCommand(5, 3); - cursor = new CommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, null, connectionSource, connection); ServerCursor serverCursor = cursor.getServerCursor(); @@ -358,7 +356,7 @@ void shouldKillCursorIfLimitIsReachedOnGetMore() { void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { assumeFalse(isSharded()); BsonDocument commandResult = executeFindCommand(5, 10); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNull(cursor.getServerCursor()); @@ -371,7 +369,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { assumeFalse(isSharded()); BsonDocument commandResult = executeFindCommand(5, 3); - cursor = new CommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursor.next()); @@ -384,7 +382,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { @DisplayName("test limit with get more") void testLimitWithGetMore() { BsonDocument commandResult = executeFindCommand(5, 2); - cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertNotNull(cursor.next()); @@ -405,7 +403,7 @@ void testLimitWithLargeDocuments() { ); BsonDocument commandResult = executeFindCommand(300, 0); - cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(300, cursorFlatten().size()); @@ -415,7 +413,7 @@ void testLimitWithLargeDocuments() { @DisplayName("should respect batch size") void shouldRespectBatchSize() { BsonDocument commandResult = executeFindCommand(2); - cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(2, cursor.getBatchSize()); @@ -432,7 +430,7 @@ void shouldRespectBatchSize() { @DisplayName("should throw cursor not found exception") void shouldThrowCursorNotFoundException() { BsonDocument commandResult = executeFindCommand(2); - cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); ServerCursor serverCursor = cursor.getServerCursor(); @@ -441,7 +439,7 @@ void shouldThrowCursorNotFoundException() { localConnection.command(getNamespace().getDatabaseName(), new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource); + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext()); localConnection.release(); cursor.next(); @@ -455,7 +453,7 @@ void shouldThrowCursorNotFoundException() { @DisplayName("should report available documents") void shouldReportAvailableDocuments() { BsonDocument commandResult = executeFindCommand(3); - cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, null, connectionSource, connection); assertEquals(3, cursor.available()); @@ -533,7 +531,7 @@ private BsonDocument executeFindCommand(final BsonDocument filter, final int lim BsonDocument results = connection.command(getDatabaseName(), findCommand, NO_OP_FIELD_NAME_VALIDATOR, readPreference, CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), - connectionSource); + connectionSource.getOperationContext()); assertNotNull(results); return results; diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy index 3d99ac477a4..a9f74ca50b3 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy @@ -16,102 +16,53 @@ package com.mongodb.internal.operation -import util.spock.annotations.Slow -import com.mongodb.MongoExecutionTimeoutException + import com.mongodb.OperationFunctionalSpecification import org.bson.BsonBinary import org.bson.BsonDocument import org.bson.BsonInt32 import org.bson.BsonString import org.bson.codecs.BsonDocumentCodec -import spock.lang.IgnoreIf - -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.executeAsync -import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.isSharded +import util.spock.annotations.Slow class CommandOperationSpecification extends OperationFunctionalSpecification { def 'should execute read command'() { given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())), - new BsonDocumentCodec()) + def operation = new CommandReadOperation(getNamespace().databaseName, + new BsonDocument('count', new BsonString(getCollectionName())), + new BsonDocumentCodec()) when: - def result = commandOperation.execute(getBinding()) + def result = execute(operation, async) then: result.getNumber('n').intValue() == 0 - } - - def 'should execute read command asynchronously'() { - given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())), - new BsonDocumentCodec()) - when: - def result = executeAsync(commandOperation) - then: - result.getNumber('n').intValue() == 0 + where: + async << [true, false] } + @Slow def 'should execute command larger than 16MB'() { - when: - def result = new CommandReadOperation<>(getNamespace().databaseName, - new BsonDocument('findAndModify', new BsonString(getNamespace().fullName)) - .append('query', new BsonDocument('_id', new BsonInt32(42))) - .append('update', - new BsonDocument('_id', new BsonInt32(42)) - .append('b', new BsonBinary( - new byte[16 * 1024 * 1024 - 30]))), - new BsonDocumentCodec()) - .execute(getBinding()) - - then: - result.containsKey('value') - } - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())) - .append('maxTimeMS', new BsonInt32(1)), - new BsonDocumentCodec()) - enableMaxTimeFailPoint() + def operation = new CommandReadOperation<>(getNamespace().databaseName, + new BsonDocument('findAndModify', new BsonString(getNamespace().fullName)) + .append('query', new BsonDocument('_id', new BsonInt32(42))) + .append('update', + new BsonDocument('_id', new BsonInt32(42)) + .append('b', new BsonBinary( + new byte[16 * 1024 * 1024 - 30]))), + new BsonDocumentCodec()) when: - commandOperation.execute(getBinding()) + def result = execute(operation, async) then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - } - - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { - given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())) - .append('maxTimeMS', new BsonInt32(1)), - new BsonDocumentCodec()) - enableMaxTimeFailPoint() - - when: - executeAsync(commandOperation) - - then: - thrown(MongoExecutionTimeoutException) + result.containsKey('value') - cleanup: - disableMaxTimeFailPoint() + where: + async << [true, false] } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy index c308e115ca8..26d7d11bc6e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.MongoException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadConcern @@ -45,16 +44,13 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS class CountDocumentsOperationSpecification extends OperationFunctionalSpecification { @@ -77,20 +73,18 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat then: operation.getFilter() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getHint() == null operation.getLimit() == 0 operation.getSkip() == 0 } - def 'should set optional values correctly'(){ + def 'should set optional values correctly'() { given: def filter = new BsonDocument('filter', new BsonInt32(1)) def hint = new BsonString('hint') when: CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace()) - .maxTime(10, MILLISECONDS) .filter(filter) .hint(hint) .limit(20) @@ -98,7 +92,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat then: operation.getFilter() == filter - operation.getMaxTime(MILLISECONDS) == 10 operation.getHint() == hint operation.getLimit() == 20 operation.getSkip() == 30 @@ -135,24 +128,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat async << [true, false] } - def 'should throw execution timeout exception from execute'() { - given: - def operation = new CountDocumentsOperation(getNamespace()).maxTime(1, SECONDS) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should use limit with the count'() { when: def operation = new CountDocumentsOperation(getNamespace()).limit(1) @@ -179,7 +154,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use hint with the count'() { given: def indexDefinition = new BsonDocument('y', new BsonInt32(1)) - new CreateIndexesOperation(getNamespace(), [new IndexRequest(indexDefinition).sparse(true)]) + new CreateIndexesOperation(getNamespace(), [new IndexRequest(indexDefinition).sparse(true)], null) .execute(getBinding()) def operation = new CountDocumentsOperation(getNamespace()).hint(indexDefinition) @@ -243,11 +218,11 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) when: - operation.filter(filter) + operation = new CountDocumentsOperation(helper.namespace) + .filter(filter) .limit(20) .skip(30) .hint(hint) - .maxTime(10, MILLISECONDS) .collation(defaultCollation) expectedCommand = expectedCommand @@ -255,7 +230,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat new BsonDocument('$skip', new BsonInt64(30)), new BsonDocument('$limit', new BsonInt64(20)), pipeline.last()])) - .append('maxTimeMS', new BsonInt32(10)) .append('collation', defaultCollation.asDocument()) .append('hint', hint) @@ -270,7 +244,8 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should support collation'() { given: getCollectionHelper().insertDocuments(BsonDocument.parse('{str: "foo"}')) - def operation = new CountDocumentsOperation(namespace).filter(BsonDocument.parse('{str: "FOO"}')) + def operation = new CountDocumentsOperation(namespace) + .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) when: @@ -285,16 +260,16 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should add read concern to command'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(ReadBinding) def source = Stub(ConnectionSource) def connection = Mock(Connection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.readConnectionSource >> source - binding.sessionContext >> sessionContext source.connection >> connection source.retain() >> source - source.getServerApi() >> null + source.operationContext >> operationContext def pipeline = new BsonArray([BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')]) def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) .append('pipeline', pipeline) @@ -309,8 +284,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.command(_, commandDocument, _, _, _, binding) >> - helper.cursorResult + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> helper.cursorResult 1 * connection.release() where: @@ -326,15 +300,16 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should add read concern to command asynchronously'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(AsyncReadBinding) def source = Stub(AsyncConnectionSource) def connection = Mock(AsyncConnection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } - binding.sessionContext >> sessionContext source.getConnection(_) >> { it[0].onResult(connection, null) } source.retain() >> source + source.operationContext >> operationContext def pipeline = new BsonArray([BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')]) def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) .append('pipeline', pipeline) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy index c327721bbd5..cddb1925b64 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy @@ -38,7 +38,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should have the correct defaults'() { when: - CreateCollectionOperation operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + CreateCollectionOperation operation = createOperation() then: !operation.isCapped() @@ -61,7 +61,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def validator = BsonDocument.parse('{ level: { $gte : 10 }}') when: - CreateCollectionOperation operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + CreateCollectionOperation operation = createOperation() .autoIndex(false) .capped(true) .sizeInBytes(1000) @@ -91,7 +91,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific assert !collectionNameExists(getCollectionName()) when: - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() execute(operation, async) then: @@ -108,16 +108,16 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific if (serverVersionLessThan(4, 2)) { storageEngineOptions.append('mmapv1', new BsonDocument()) } - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .storageEngineOptions(storageEngineOptions) when: execute(operation, async) then: - new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find { - it -> it.getString('name').value == getCollectionName() - }.getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions + new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()) + .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() } + .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions where: async << [true, false] @@ -130,17 +130,16 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific if (serverVersionLessThan(4, 2)) { storageEngineOptions.append('mmapv1', new BsonDocument()) } - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .storageEngineOptions(storageEngineOptions) when: execute(operation, async) then: - new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find { - it -> it.getString('name').value == getCollectionName() - }.getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions - + new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()) + .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() } + .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions where: async << [true, false] } @@ -148,7 +147,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should create capped collection'() { given: assert !collectionNameExists(getCollectionName()) - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .capped(true) .maxDocuments(100) .sizeInBytes(40 * 1024) @@ -177,7 +176,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should create collection in respect to the autoIndex option'() { given: assert !collectionNameExists(getCollectionName()) - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .autoIndex(autoIndex) when: @@ -199,7 +198,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific given: assert !collectionNameExists(getCollectionName()) def indexOptionDefaults = BsonDocument.parse('{ storageEngine: { wiredTiger : {} }}') - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .indexOptionDefaults(indexOptionDefaults) when: @@ -218,7 +217,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific given: assert !collectionNameExists(getCollectionName()) def validator = BsonDocument.parse('{ level: { $gte : 10 }}') - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .validator(validator) .validationLevel(ValidationLevel.MODERATE) .validationAction(ValidationAction.ERROR) @@ -247,7 +246,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should throw on write concern error'() { given: assert !collectionNameExists(getCollectionName()) - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName(), new WriteConcern(5)) + def operation = createOperation(new WriteConcern(5)) when: execute(operation, async) @@ -264,7 +263,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should be able to create a collection with a collation'() { given: - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()).collation(defaultCollation) + def operation = createOperation().collation(defaultCollation) when: execute(operation, async) @@ -287,6 +286,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific getCollectionInfo(collectionName) != null } + BsonDocument storageStats() { if (serverVersionLessThan(6, 2)) { return new CommandReadOperation<>(getDatabaseName(), @@ -294,6 +294,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific new BsonDocumentCodec()).execute(getBinding()) } BatchCursor cursor = new AggregateOperation( + getNamespace(), singletonList(new BsonDocument('$collStats', new BsonDocument('storageStats', new BsonDocument()))), new BsonDocumentCodec()).execute(getBinding()) @@ -303,4 +304,12 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific cursor.close() } } + + def createOperation() { + createOperation(null) + } + + def createOperation(WriteConcern writeConcern) { + new CreateCollectionOperation(getDatabaseName(), getCollectionName(), writeConcern) + } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy index 3f0f1938bb6..389f4388b54 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy @@ -20,7 +20,6 @@ import com.mongodb.CreateIndexCommitQuorum import com.mongodb.DuplicateKeyException import com.mongodb.MongoClientException import com.mongodb.MongoCommandException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoWriteConcernException import com.mongodb.OperationFunctionalSpecification import com.mongodb.WriteConcern @@ -35,11 +34,8 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet -import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan import static java.util.concurrent.TimeUnit.SECONDS @@ -53,14 +49,13 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should get index names'() { when: - def createIndexOperation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field1', new BsonInt32(1))), - new IndexRequest(new BsonDocument('field2', new BsonInt32(-1))), - new IndexRequest(new BsonDocument('field3', new BsonInt32(1)) - .append('field4', new BsonInt32(-1))), - new IndexRequest(new BsonDocument('field5', new BsonInt32(-1))) - .name('customName') - ]) + def createIndexOperation = createOperation([new IndexRequest(new BsonDocument('field1', new BsonInt32(1))), + new IndexRequest(new BsonDocument('field2', new BsonInt32(-1))), + new IndexRequest(new BsonDocument('field3', new BsonInt32(1)) + .append('field4', new BsonInt32(-1))), + new IndexRequest(new BsonDocument('field5', new BsonInt32(-1))) + .name('customName') + ]) then: createIndexOperation.indexNames == ['field1_1', 'field2_-1', 'field3_1_field4_-1', 'customName'] } @@ -68,7 +63,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a single index'() { given: def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) when: execute(operation, async) @@ -80,32 +75,11 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati async << [true, false] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]).maxTime(30, SECONDS) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - @IgnoreIf({ serverVersionAtLeast(4, 4) }) def 'should throw exception if commit quorum is set where server < 4.4'() { given: def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) .commitQuorum(CreateIndexCommitQuorum.MAJORITY) when: @@ -124,7 +98,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def keys = new BsonDocument('field', new BsonInt32(1)) when: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) .commitQuorum(quorum) then: @@ -144,7 +118,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a single index with a BsonInt64'() { given: def keys = new BsonDocument('field', new BsonInt64(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) when: execute(operation, async) @@ -160,8 +134,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati given: def keysForFirstIndex = new BsonDocument('field', new BsonInt32(1)) def keysForSecondIndex = new BsonDocument('field2', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keysForFirstIndex), - new IndexRequest(keysForSecondIndex)]) + def operation = createOperation([new IndexRequest(keysForFirstIndex), + new IndexRequest(keysForSecondIndex)]) when: execute(operation, async) @@ -176,7 +150,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a single index on a nested field'() { given: def keys = new BsonDocument('x.y', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) when: execute(operation, async) @@ -191,8 +165,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to handle duplicate key errors when indexing'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), x1, x1) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true)]) + def operation = createOperation([new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true)]) when: execute(operation, async) @@ -208,8 +181,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should drop duplicates'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), x1, x1) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true).dropDups(true)]) + def operation = createOperation([new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true).dropDups(true)]) when: execute(operation, async) @@ -223,7 +195,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should throw when trying to build an invalid index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument())]) + def operation = createOperation([new IndexRequest(new BsonDocument())]) when: execute(operation, async) @@ -237,8 +209,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a unique index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -248,8 +219,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).unique(true)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).unique(true)]) execute(operation, async) then: @@ -261,7 +231,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a sparse index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -271,8 +241,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).sparse(true)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).sparse(true)]) execute(operation, async) then: @@ -284,8 +253,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a TTL indexes'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -295,8 +263,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).expireAfter(100, SECONDS)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).expireAfter(100, SECONDS)]) execute(operation, async) then: @@ -309,8 +276,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a 2d indexes'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2d')))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d')))]) when: execute(operation, async) @@ -320,8 +286,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2d'))).bits(2).min(1.0).max(2.0)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d'))).bits(2).min(1.0).max(2.0)]) execute(operation, async) then: @@ -336,8 +301,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a 2dSphereIndex'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2dsphere')))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere')))]) when: execute(operation, async) @@ -351,8 +315,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a 2dSphereIndex with version 1'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2dsphere'))).sphereVersion(1)]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere'))).sphereVersion(1)]) when: execute(operation, async) @@ -367,11 +330,10 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a textIndex'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('text'))) - .defaultLanguage('es') - .languageOverride('language') - .weights(new BsonDocument('field', new BsonInt32(100)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text'))) + .defaultLanguage('es') + .languageOverride('language') + .weights(new BsonDocument('field', new BsonInt32(100)))]) when: execute(operation, async) @@ -388,8 +350,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a textIndexVersion'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('text')))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text')))]) when: execute(operation, async) @@ -403,8 +364,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a textIndexVersion with version 1'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('text'))).textVersion(1)]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text'))).textVersion(1)]) when: execute(operation, async) @@ -420,9 +380,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should pass through storage engine options'() { given: def storageEngineOptions = new Document('wiredTiger', new Document('configString', 'block_compressor=zlib')) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('a', new BsonInt32(1))) - .storageEngine(new BsonDocumentWrapper(storageEngineOptions, new DocumentCodec()))]) + def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1))) + .storageEngine(new BsonDocumentWrapper(storageEngineOptions, new DocumentCodec()))]) when: execute(operation, async) @@ -438,9 +397,9 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a partially filtered index'() { given: def partialFilterExpression = new Document('a', new Document('$gte', 10)) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))) - .partialFilterExpression(new BsonDocumentWrapper(partialFilterExpression, new DocumentCodec()))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))) + .partialFilterExpression(new BsonDocumentWrapper(partialFilterExpression, + new DocumentCodec()))]) when: execute(operation, async) @@ -473,8 +432,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should be able to create an index with collation'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('a', new BsonInt32(1))).collation(defaultCollation)]) + def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1))).collation(defaultCollation)]) when: execute(operation, async) @@ -491,9 +449,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(4, 2) }) def 'should be able to create wildcard indexes'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('$**', new BsonInt32(1))), - new IndexRequest(new BsonDocument('tags.$**', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1))), + new IndexRequest(new BsonDocument('tags.$**', new BsonInt32(1)))]) when: execute(operation, async) @@ -509,9 +466,9 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(4, 2) }) def 'should be able to create wildcard index with projection'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('$**', new BsonInt32(1))) - .wildcardProjection(new BsonDocument('a', BsonBoolean.TRUE).append('_id', BsonBoolean.FALSE))]) + def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1))) + .wildcardProjection(new BsonDocument('a', BsonBoolean.TRUE).append('_id', + BsonBoolean.FALSE))]) when: execute(operation, async) @@ -527,7 +484,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(4, 4) }) def 'should be able to set hidden index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -537,8 +494,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).hidden(true)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).hidden(true)]) execute(operation, async) then: @@ -571,4 +527,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati getUserCreatedIndexes()*.get(keyname).findAll { it != null } } + def createOperation(final List requests) { + new CreateIndexesOperation(getNamespace(), requests, null) + } + } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy index 87fc13aaa31..52ad4334493 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy @@ -51,7 +51,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification getCollectionHelper().insertDocuments([trueXDocument, falseXDocument]) def pipeline = [new BsonDocument('$match', trueXDocument)] - def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, pipeline, WriteConcern.ACKNOWLEDGED) + def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, pipeline, + WriteConcern.ACKNOWLEDGED) when: execute(operation, async) @@ -79,7 +80,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification assert !collectionNameExists(viewOn) assert !collectionNameExists(viewName) - def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, [], WriteConcern.ACKNOWLEDGED) + def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, [], + WriteConcern.ACKNOWLEDGED) .collation(defaultCollation) when: @@ -120,7 +122,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) assert !collectionNameExists(viewName) - def operation = new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], new WriteConcern(5)) + def operation = new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], + new WriteConcern(5)) when: execute(operation, async) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy index 40f707ccf1b..587e05e1d0c 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadConcern @@ -38,7 +37,6 @@ import com.mongodb.internal.session.SessionContext import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonInt32 -import org.bson.BsonInt64 import org.bson.BsonInvalidOperationException import org.bson.BsonString import org.bson.BsonTimestamp @@ -53,15 +51,12 @@ import org.bson.codecs.ValueCodecProvider import org.bson.types.ObjectId import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders class DistinctOperationSpecification extends OperationFunctionalSpecification { @@ -80,7 +75,6 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { then: operation.getFilter() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getCollation() == null } @@ -90,13 +84,11 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { when: DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) - .maxTime(10, MILLISECONDS) .filter(filter) .collation(defaultCollation) then: operation.getFilter() == filter - operation.getMaxTime(MILLISECONDS) == 10 operation.getCollation() == defaultCollation } @@ -186,24 +178,6 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { async << [true, false] } - def 'should throw execution timeout exception from execute'() { - given: - def operation = new DistinctOperation(getNamespace(), 'name', stringDecoder).maxTime(1, SECONDS) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should use the ReadBindings readPreference to set secondaryOk'() { when: def operation = new DistinctOperation(helper.namespace, 'name', helper.decoder) @@ -219,13 +193,11 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { when: def operation = new DistinctOperation(helper.namespace, 'name', new BsonDocumentCodec()) .filter(new BsonDocument('a', BsonBoolean.TRUE)) - .maxTime(10, MILLISECONDS) .collation(defaultCollation) def expectedCommand = new BsonDocument('distinct', new BsonString(helper.namespace.getCollectionName())) .append('key', new BsonString('name')) .append('query', operation.getFilter()) - .append('maxTimeMS', new BsonInt64(operation.getMaxTime(MILLISECONDS))) .append('collation', defaultCollation.asDocument()) then: @@ -240,7 +212,8 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { given: def document = Document.parse('{str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new DistinctOperation(namespace, 'str', stringDecoder).filter(BsonDocument.parse('{str: "FOO"}}')) + def operation = new DistinctOperation(namespace, 'str', stringDecoder) + .filter(BsonDocument.parse('{str: "FOO"}}')) .collation(caseInsensitiveCollation) when: @@ -255,16 +228,16 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should add read concern to command'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(ReadBinding) def source = Stub(ConnectionSource) def connection = Mock(Connection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.readConnectionSource >> source - binding.sessionContext >> sessionContext source.connection >> connection source.retain() >> source - source.getServerApi() >> null + source.operationContext >> operationContext def commandDocument = new BsonDocument('distinct', new BsonString(getCollectionName())) .append('key', new BsonString('str')) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) @@ -277,7 +250,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.command(_, commandDocument, _, _, _, _) >> + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> new BsonDocument('values', new BsonArrayWrapper([])) 1 * connection.release() @@ -294,14 +267,14 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should add read concern to command asynchronously'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(AsyncReadBinding) def source = Stub(AsyncConnectionSource) def connection = Mock(AsyncConnection) - binding.serverApi >> null binding.readPreference >> ReadPreference.primary() binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } - binding.sessionContext >> sessionContext - source.serverApi >> null + binding.operationContext >> operationContext + source.operationContext >> operationContext source.getConnection(_) >> { it[0].onResult(connection, null) } source.retain() >> source def commandDocument = new BsonDocument('distinct', new BsonString(getCollectionName())) @@ -316,7 +289,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.commandAsync(_, commandDocument, _, _, _, *_) >> { + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, *_) >> { it.last().onResult(new BsonDocument('values', new BsonArrayWrapper([])), null) } 1 * connection.release() diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy index 0c293ed58b0..67124fecf30 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy @@ -37,7 +37,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat assert collectionNameExists(getCollectionName()) when: - new DropCollectionOperation(getNamespace()).execute(getBinding()) + new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED).execute(getBinding()) then: !collectionNameExists(getCollectionName()) @@ -50,7 +50,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat assert collectionNameExists(getCollectionName()) when: - executeAsync(new DropCollectionOperation(getNamespace())) + executeAsync(new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED)) then: !collectionNameExists(getCollectionName()) @@ -61,7 +61,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') when: - new DropCollectionOperation(namespace).execute(getBinding()) + new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) then: !collectionNameExists('nonExistingCollection') @@ -73,7 +73,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') when: - executeAsync(new DropCollectionOperation(namespace)) + executeAsync(new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED)) then: !collectionNameExists('nonExistingCollection') diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy index 1069dbfe2a6..61648c1daec 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy @@ -42,47 +42,28 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio assert databaseNameExists(databaseName) when: - new DropDatabaseOperation(databaseName).execute(getBinding()) + execute(new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED), async) then: !databaseNameExists(databaseName) - } - - - @IgnoreIf({ isSharded() }) - def 'should drop a database that exists asynchronously'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) - assert databaseNameExists(databaseName) - when: - executeAsync(new DropDatabaseOperation(databaseName)) - - then: - !databaseNameExists(databaseName) + where: + async << [true, false] } + def 'should not error when dropping a collection that does not exist'() { given: def dbName = 'nonExistingDatabase' when: - new DropDatabaseOperation(dbName).execute(getBinding()) + execute(new DropDatabaseOperation(dbName, WriteConcern.ACKNOWLEDGED), async) then: !databaseNameExists(dbName) - } - - - def 'should not error when dropping a collection that does not exist asynchronously'() { - given: - def dbName = 'nonExistingDatabase' - when: - executeAsync(new DropDatabaseOperation(dbName)) - - then: - !databaseNameExists(dbName) + where: + async << [true, false] } @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() }) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy index 029b2c8544b..a051231af7e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.MongoException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoWriteConcernException import com.mongodb.OperationFunctionalSpecification import com.mongodb.WriteConcern @@ -30,19 +29,15 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import spock.lang.Unroll -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet -import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionLessThan -import static java.util.concurrent.TimeUnit.SECONDS class DropIndexOperationSpecification extends OperationFunctionalSpecification { def 'should not error when dropping non-existent index on non-existent collection'() { when: - execute(new DropIndexOperation(getNamespace(), 'made_up_index_1'), async) + execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async) then: getIndexes().size() == 0 @@ -56,7 +51,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) when: - execute(new DropIndexOperation(getNamespace(), 'made_up_index_1'), async) + execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async) then: thrown(MongoException) @@ -70,7 +65,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) when: - execute(new DropIndexOperation(getNamespace(), 'theField_1'), async) + execute(new DropIndexOperation(getNamespace(), 'theField_1', null), async) List indexes = getIndexes() then: @@ -87,7 +82,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(keys) when: - execute(new DropIndexOperation(getNamespace(), keys), async) + execute(new DropIndexOperation(getNamespace(), keys, null), async) List indexes = getIndexes() then: @@ -105,35 +100,14 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { ].combinations() } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - def keys = new BsonDocument('theField', new BsonInt32(1)) - collectionHelper.createIndex(keys) - def operation = new DropIndexOperation(getNamespace(), keys).maxTime(30, SECONDS) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should drop existing index by key when using BsonInt64'() { given: def keys = new BsonDocument('theField', new BsonInt32(1)) collectionHelper.createIndex(keys) when: - execute(new DropIndexOperation(getNamespace(), new BsonDocument('theField', new BsonInt64(1))), async) + execute(new DropIndexOperation(getNamespace(), new BsonDocument('theField', new BsonInt64(1)), null), + async) List indexes = getIndexes() then: @@ -150,7 +124,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(new BsonDocument('theOtherField', new BsonInt32(1))) when: - execute(new DropIndexOperation(getNamespace(), '*'), async) + execute(new DropIndexOperation(getNamespace(), '*', null), async) List indexes = getIndexes() then: diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy index aad74b1881f..ccc9614d1fb 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy @@ -34,8 +34,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import java.util.concurrent.TimeUnit - import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -63,7 +61,6 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.MILLISECONDS) == 0 operation.getCollation() == null } @@ -78,14 +75,12 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati .filter(filter) .sort(sort) .projection(projection) - .maxTime(10, TimeUnit.MILLISECONDS) .collation(defaultCollation) then: operation.getFilter() == filter operation.getSort() == sort operation.getProjection() == projection - operation.getMaxTime(TimeUnit.MILLISECONDS) == 10 operation.getCollation() == defaultCollation } @@ -118,8 +113,8 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati getWorkerCollectionHelper().insertDocuments(new WorkerCodec(), pete, sam) when: - FindAndDeleteOperation operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, - workerCodec).filter(new BsonDocument('name', new BsonString('Pete'))) + FindAndDeleteOperation operation = new FindAndDeleteOperation(getNamespace(), + ACKNOWLEDGED, false, workerCodec).filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) then: @@ -220,12 +215,10 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati operation.filter(filter) .sort(sort) .projection(projection) - .maxTime(10, TimeUnit.MILLISECONDS) expectedCommand.append('query', filter) .append('sort', sort) .append('fields', projection) - .append('maxTimeMS', new BsonInt64(10)) operation.collation(defaultCollation) expectedCommand.append('collation', defaultCollation.asDocument()) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy index a4a0a48bd60..4c334fa0ea0 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy @@ -40,8 +40,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import java.util.concurrent.TimeUnit - import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -62,7 +60,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should have the correct defaults and passed values'() { when: def replacement = new BsonDocument('replace', new BsonInt32(1)) - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, + replacement) then: operation.getNamespace() == getNamespace() @@ -72,7 +71,6 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.SECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null } @@ -86,7 +84,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, new BsonDocument('replace', new BsonInt32(1))).filter(filter).sort(sort).projection(projection) - .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true).returnOriginal(false) + .bypassDocumentValidation(true).upsert(true).returnOriginal(false) .collation(defaultCollation) then: @@ -94,7 +92,6 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat operation.getSort() == sort operation.getProjection() == projection operation.upsert == true - operation.getMaxTime(TimeUnit.SECONDS) == 1 operation.getBypassDocumentValidation() !operation.isReturnOriginal() operation.getCollation() == defaultCollation @@ -110,7 +107,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -144,8 +142,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new WorkerCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, - replacement).filter(new BsonDocument('name', new BsonString('Pete'))) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + workerCodec, replacement).filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) then: @@ -154,7 +152,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: replacement = new BsonDocumentWrapper(pete, workerCodec) - operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, replacement) + operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, + replacement) .filter(new BsonDocument('name', new BsonString('Jordan'))) .returnOriginal(false) returnedDocument = execute(operation, async) @@ -169,7 +168,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should return null if query fails to match'() { when: BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -183,7 +183,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should throw an exception if replacement contains update operators'() { given: def replacement = new BsonDocumentWrapper(['$inc': 1] as Document, documentCodec) - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, replacement) when: execute(operation, async) @@ -207,7 +208,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: def replacement = new BsonDocument('level', new BsonInt32(9)) - def operation = new FindAndReplaceOperation(namespace, ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(namespace, ACKNOWLEDGED, false, + documentCodec, replacement) execute(operation, async) then: @@ -245,8 +247,9 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') when: - def operation = new FindAndReplaceOperation(getNamespace(), new WriteConcern(5, 1), - false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Pete'))) + def operation = new FindAndReplaceOperation(getNamespace(), + new WriteConcern(5, 1), false, documentCodec, jordan) + .filter(new BsonDocument('name', new BsonString('Pete'))) execute(operation, async) then: @@ -341,12 +344,10 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat .sort(sort) .projection(projection) .bypassDocumentValidation(true) - .maxTime(10, TimeUnit.MILLISECONDS) expectedCommand.append('query', filter) .append('sort', sort) .append('fields', projection) - .append('maxTimeMS', new BsonInt64(10)) operation.collation(defaultCollation) expectedCommand.append('collation', defaultCollation.asDocument()) @@ -376,7 +377,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, jordan) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) @@ -398,7 +400,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, replacement) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', replacement) .append('txnNumber', new BsonInt64(0)) @@ -414,7 +417,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should throw original error when retrying and failing'() { given: def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, replacement) def originalException = new MongoSocketException('Some failure', new ServerAddress()) when: @@ -443,7 +447,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def replacement = BsonDocument.parse('{str: "bar"}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, replacement) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy index d6625cd4d88..821eacbee6e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy @@ -41,8 +41,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import java.util.concurrent.TimeUnit - import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -64,7 +62,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should have the correct defaults and passed values'() { when: def update = new BsonDocument('update', new BsonInt32(1)) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) then: operation.getNamespace() == getNamespace() @@ -74,7 +73,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.SECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null } @@ -93,7 +91,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.SECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null } @@ -105,9 +102,12 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, - new BsonDocument('update', new BsonInt32(1))).filter(filter).sort(sort).projection(projection) - .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true) + def operation = new FindAndUpdateOperation(getNamespace(), + ACKNOWLEDGED, false, documentCodec, new BsonDocument('update', new BsonInt32(1))) + .filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true).upsert(true) .returnOriginal(false) .collation(defaultCollation) @@ -116,7 +116,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getSort() == sort operation.getProjection() == projection operation.upsert == true - operation.getMaxTime(TimeUnit.SECONDS) == 1 operation.getBypassDocumentValidation() !operation.isReturnOriginal() operation.getCollation() == defaultCollation @@ -130,10 +129,12 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, - new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1))))) - .filter(filter).sort(sort).projection(projection) - .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1))))) + .filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true).upsert(true) .returnOriginal(false) .collation(defaultCollation) @@ -142,7 +143,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getSort() == sort operation.getProjection() == projection operation.upsert == true - operation.getMaxTime(TimeUnit.SECONDS) == 1 operation.getBypassDocumentValidation() !operation.isReturnOriginal() operation.getCollation() == defaultCollation @@ -158,7 +158,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -169,7 +170,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) returnedDocument = execute(operation, async) @@ -223,7 +225,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + workerCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) @@ -234,7 +237,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, update) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + workerCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) returnedDocument = execute(operation, async) @@ -287,7 +291,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw an exception if update contains fields that are not update operators'() { given: def update = new BsonDocument('x', new BsonInt32(1)) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) when: execute(operation, async) @@ -333,7 +338,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('level', new BsonInt32(-1))) - def operation = new FindAndUpdateOperation(namespace, ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(namespace, ACKNOWLEDGED, false, + documentCodec, update) execute(operation, async) then: @@ -368,7 +374,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) when: - def operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), + new WriteConcern(5, 1), false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) execute(operation, async) @@ -381,7 +388,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati ex.writeResult.upsertedId == null when: - operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update) + operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Bob'))) .upsert(true) execute(operation, async) @@ -410,7 +418,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati configureFailPoint(failPoint) def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -461,12 +470,10 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati .sort(sort) .projection(projection) .bypassDocumentValidation(true) - .maxTime(10, TimeUnit.MILLISECONDS) expectedCommand.append('query', filter) .append('sort', sort) .append('fields', projection) - .append('maxTimeMS', new BsonInt64(10)) operation.collation(defaultCollation) expectedCommand.append('collation', defaultCollation.asDocument()) @@ -496,7 +503,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) @@ -519,7 +527,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', update) .append('txnNumber', new BsonInt64(0)) @@ -535,7 +544,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw original error when retrying and failing'() { given: def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) def originalException = new MongoSocketException('Some failure', new ServerAddress()) when: @@ -564,7 +574,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def update = BsonDocument.parse('{ $set: {str: "bar"}}') - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) @@ -586,7 +597,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati getCollectionHelper().insertDocuments(documentOne, documentTwo) def update = BsonDocument.parse('{ $set: {"y.$[i].b": 2}}') def arrayFilters = [BsonDocument.parse('{"i.b": 3}')] - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .returnOriginal(false) .arrayFilters(arrayFilters) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy index 3bd84accd6f..f70cac7b6ad 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.ClusterFixture -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.MongoQueryException import com.mongodb.OperationFunctionalSpecification @@ -31,7 +30,7 @@ import com.mongodb.connection.ClusterId import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ConnectionId import com.mongodb.connection.ServerId -import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.binding.AsyncClusterBinding import com.mongodb.internal.binding.AsyncConnectionSource import com.mongodb.internal.binding.AsyncReadBinding @@ -53,10 +52,10 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.executeSync +import static com.mongodb.ClusterFixture.getAsyncBinding import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.getCluster @@ -69,8 +68,6 @@ import static com.mongodb.CursorType.TailableAwait import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.junit.Assert.assertEquals class FindOperationSpecification extends OperationFunctionalSpecification { @@ -86,8 +83,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification { operation.getNamespace() == getNamespace() operation.getDecoder() == decoder operation.getFilter() == null - operation.getMaxTime(MILLISECONDS) == 0 - operation.getMaxAwaitTime(MILLISECONDS) == 0 operation.getHint() == null operation.getLimit() == 0 operation.getSkip() == 0 @@ -107,8 +102,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification { when: FindOperation operation = new FindOperation(getNamespace(), new DocumentCodec()) - .maxTime(10, SECONDS) - .maxAwaitTime(20, SECONDS) .filter(filter) .limit(20) .skip(30) @@ -123,8 +116,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification { then: operation.getFilter() == filter - operation.getMaxTime(MILLISECONDS) == 10000 - operation.getMaxAwaitTime(MILLISECONDS) == 20000 operation.getLimit() == 20 operation.getSkip() == 30 operation.getHint() == hint @@ -166,7 +157,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { where: [async, operation] << [ [true, false], - [new FindOperation(getNamespace(), new DocumentCodec()).filter(new BsonDocument('_id', new BsonInt32(1)))] + [new FindOperation(getNamespace(), new DocumentCodec()) + .filter(new BsonDocument('_id', new BsonInt32(1)))] ].combinations() } @@ -186,7 +178,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { where: [async, operation] << [ [true, false], - [new FindOperation(getNamespace(), new DocumentCodec()).sort(new BsonDocument('_id', new BsonInt32(1)))] + [new FindOperation(getNamespace(), new DocumentCodec()) + .sort(new BsonDocument('_id', new BsonInt32(1)))] ].combinations() } @@ -308,29 +301,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification { async << [true, false] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - [async, operation] << [ - [true, false], - [new FindOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS)] - ].combinations() - } - def '$max should limit items returned'() { given: (1..100).each { @@ -417,8 +387,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' def operation = new FindOperation(getNamespace(), new DocumentCodec()) .comment(new BsonString(expectedComment)) @@ -437,7 +407,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { } cleanup: - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()) .execute(getBinding()) profileCollectionHelper.drop() @@ -468,10 +439,9 @@ class FindOperationSpecification extends OperationFunctionalSpecification { given: collectionHelper.insertDocuments(new DocumentCodec(), new Document()) def operation = new FindOperation(getNamespace(), new DocumentCodec()) - def syncBinding = new ClusterBinding(getCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, null, - IgnorableRequestContext.INSTANCE) - def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, null, - IgnorableRequestContext.INSTANCE) + def syncBinding = new ClusterBinding(getCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) + def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, + OPERATION_CONTEXT) when: def result = async ? executeAsync(operation, asyncBinding) : executeSync(operation, syncBinding) @@ -495,9 +465,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def hedgeOptions = isHedgeEnabled != null ? ReadPreferenceHedgeOptions.builder().enabled(isHedgeEnabled as boolean).build() : null def readPreference = ReadPreference.primaryPreferred().withHedgeOptions(hedgeOptions) - def syncBinding = new ClusterBinding(getCluster(), readPreference, ReadConcern.DEFAULT, null, IgnorableRequestContext.INSTANCE) - def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), readPreference, ReadConcern.DEFAULT, null, - IgnorableRequestContext.INSTANCE) + def syncBinding = new ClusterBinding(getCluster(), readPreference, ReadConcern.DEFAULT, OPERATION_CONTEXT) + def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), readPreference, ReadConcern.DEFAULT, OPERATION_CONTEXT) def cursor = async ? executeAsync(operation, asyncBinding) : executeSync(operation, syncBinding) def firstBatch = { if (async) { @@ -518,16 +487,16 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should add read concern to command'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(ReadBinding) def source = Stub(ConnectionSource) def connection = Mock(Connection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.readConnectionSource >> source - binding.sessionContext >> sessionContext source.connection >> connection source.retain() >> source - source.getServerApi() >> null + source.operationContext >> operationContext def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) @@ -539,7 +508,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.command(_, commandDocument, _, _, _, binding) >> + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) .append('ns', new BsonString(getNamespace().getFullName())) .append('firstBatch', new BsonArrayWrapper([]))) @@ -558,14 +527,14 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should add read concern to command asynchronously'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(AsyncReadBinding) def source = Stub(AsyncConnectionSource) def connection = Mock(AsyncConnection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } - binding.sessionContext >> sessionContext - source.serverApi >> null + source.operationContext >> operationContext source.getConnection(_) >> { it[0].onResult(connection, null) } source.retain() >> source def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) @@ -579,7 +548,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> { + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) .append('ns', new BsonString(getNamespace().getFullName())) .append('firstBatch', new BsonArrayWrapper([]))), null) @@ -599,16 +568,16 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should add allowDiskUse to command if the server version >= 3.2'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(ReadBinding) def source = Stub(ConnectionSource) def connection = Mock(Connection) binding.readPreference >> ReadPreference.primary() binding.readConnectionSource >> source - binding.serverApi >> null - binding.sessionContext >> sessionContext + binding.operationContext >> operationContext source.connection >> connection source.retain() >> source - source.getServerApi() >> null + source.operationContext >> operationContext def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) @@ -620,7 +589,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.command(_, commandDocument, _, _, _, binding) >> + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) .append('ns', new BsonString(getNamespace().getFullName())) .append('firstBatch', new BsonArrayWrapper([]))) @@ -639,14 +608,14 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should add allowDiskUse to command if the server version >= 3.2 asynchronously'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(AsyncReadBinding) def source = Stub(AsyncConnectionSource) def connection = Mock(AsyncConnection) - binding.serverApi >> null + binding.operationContext >> operationContext binding.readPreference >> ReadPreference.primary() binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } - binding.sessionContext >> sessionContext - source.serverApi >> null + source.operationContext >> operationContext source.getConnection(_) >> { it[0].onResult(connection, null) } source.retain() >> source def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) @@ -660,7 +629,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> { + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) .append('ns', new BsonString(getNamespace().getFullName())) .append('firstBatch', new BsonArrayWrapper([]))), null) @@ -681,17 +650,24 @@ class FindOperationSpecification extends OperationFunctionalSpecification { // sanity check that the server accepts tailable and await data flags def 'should pass tailable and await data flags through'() { given: - def (cursorType, maxAwaitTimeMS, maxTimeMSForCursor) = cursorDetails + def (cursorType, long maxAwaitTimeMS, long maxTimeMSForCursor) = cursorDetails + def timeoutSettings = ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT.withMaxAwaitTimeMS(maxAwaitTimeMS) + def timeoutContext = Spy(TimeoutContext, constructorArgs: [timeoutSettings]) + def operationContext = OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + collectionHelper.create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)) def operation = new FindOperation(namespace, new BsonDocumentCodec()) .cursorType(cursorType) - .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS) when: - def cursor = execute(operation, async) + if (async) { + execute(operation, getBinding(operationContext)) + } else { + execute(operation, getAsyncBinding(operationContext)) + } then: - cursor.maxTimeMS == maxTimeMSForCursor + timeoutContext.setMaxTimeOverride(maxTimeMSForCursor) where: [async, cursorDetails] << [ diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy index 38c267dd3f7..07a3fadc5fd 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy @@ -17,12 +17,12 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference import com.mongodb.ServerAddress import com.mongodb.ServerCursor +import com.mongodb.WriteConcern import com.mongodb.async.FutureResultCallback import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.connection.ConnectionDescription @@ -45,14 +45,11 @@ import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan -import static java.util.concurrent.TimeUnit.MILLISECONDS class ListCollectionsOperationSpecification extends OperationFunctionalSpecification { @@ -260,7 +257,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes when calling hasNext before next'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) @@ -276,7 +273,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes without calling hasNext before next'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) @@ -298,7 +295,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes when calling hasNext before tryNext'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) @@ -320,7 +317,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes without calling hasNext before tryNext'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) @@ -337,7 +334,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes asynchronously'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) @@ -413,55 +410,18 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica cursor?.close() } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).maxTime(1000, MILLISECONDS) - - enableMaxTimeFailPoint() - - when: - operation.execute(getBinding()) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - } - - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).maxTime(1000, MILLISECONDS) - - enableMaxTimeFailPoint() - - when: - executeAsync(operation) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - } - def 'should use the readPreference to set secondaryOk'() { given: def connection = Mock(Connection) def connectionSource = Stub(ConnectionSource) { - getServerApi() >> null - getReadPreference() >> readPreference getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT } def readBinding = Stub(ReadBinding) { getReadConnectionSource() >> connectionSource getReadPreference() >> readPreference - getServerApi() >> null + getOperationContext() >> OPERATION_CONTEXT } def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) @@ -470,7 +430,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica then: _ * connection.getDescription() >> helper.threeSixConnectionDescription - 1 * connection.command(_, _, _, readPreference, _, readBinding) >> helper.commandResult + 1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult 1 * connection.release() where: @@ -481,14 +441,14 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica given: def connection = Mock(AsyncConnection) def connectionSource = Stub(AsyncConnectionSource) { - getReadPreference() >> readPreference - getServerApi() >> null getConnection(_) >> { it[0].onResult(connection, null) } + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT } def readBinding = Stub(AsyncReadBinding) { - getReadPreference() >> readPreference - getServerApi() >> null getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT } def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) @@ -497,7 +457,8 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica then: _ * connection.getDescription() >> helper.threeSixConnectionDescription - 1 * connection.commandAsync(helper.dbName, _, _, readPreference, *_) >> { it.last().onResult(helper.commandResult, null) } + 1 * connection.commandAsync(helper.dbName, _, _, readPreference, _, OPERATION_CONTEXT, *_) >> { + it.last().onResult(helper.commandResult, null) } where: readPreference << [ReadPreference.primary(), ReadPreference.secondary()] diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy index 95afad40957..740f9073dcd 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference import com.mongodb.connection.ConnectionDescription @@ -33,14 +32,8 @@ import org.bson.BsonRegularExpression import org.bson.Document import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec -import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.executeAsync -import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.isSharded -import static java.util.concurrent.TimeUnit.MILLISECONDS +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT class ListDatabasesOperationSpecification extends OperationFunctionalSpecification { def codec = new DocumentCodec() @@ -75,55 +68,18 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati async << [true, false] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListDatabasesOperation(codec).maxTime(1000, MILLISECONDS) - - enableMaxTimeFailPoint() - - when: - operation.execute(getBinding()) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - } - - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListDatabasesOperation(codec).maxTime(1000, MILLISECONDS) - - enableMaxTimeFailPoint() - - when: - executeAsync(operation) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - } - def 'should use the readPreference to set secondaryOk'() { given: def connection = Mock(Connection) def connectionSource = Stub(ConnectionSource) { - getReadPreference() >> readPreference - getServerApi() >> null getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT } def readBinding = Stub(ReadBinding) { getReadConnectionSource() >> connectionSource getReadPreference() >> readPreference - getServerApi() >> null + getOperationContext() >> OPERATION_CONTEXT } def operation = new ListDatabasesOperation(helper.decoder) @@ -132,7 +88,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati then: _ * connection.getDescription() >> helper.connectionDescription - 1 * connection.command(_, _, _, readPreference, _, readBinding) >> helper.commandResult + 1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult 1 * connection.release() where: @@ -148,7 +104,6 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati } def readBinding = Stub(AsyncReadBinding) { getReadPreference() >> readPreference - getServerApi() >> null getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } def operation = new ListDatabasesOperation(helper.decoder) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy index 51280de9b45..462bf367e50 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference @@ -42,14 +41,10 @@ import org.bson.BsonString import org.bson.Document import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec -import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.isSharded -import static java.util.concurrent.TimeUnit.MILLISECONDS class ListIndexesOperationSpecification extends OperationFunctionalSpecification { @@ -116,8 +111,8 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) - new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)]) - .execute(getBinding()) + new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding()) when: BatchCursor cursor = operation.execute(getBinding()) @@ -136,8 +131,8 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) - new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)]) - .execute(getBinding()) + new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding()) when: def cursor = executeAsync(operation) @@ -212,56 +207,18 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification cursor?.close() } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS) - collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) - - enableMaxTimeFailPoint() - - when: - operation.execute(getBinding()) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - } - - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { - given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS) - collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) - - enableMaxTimeFailPoint() - - when: - executeAsync(operation) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - } - - def 'should use the readPreference to set secondaryOk'() { given: def connection = Mock(Connection) def connectionSource = Stub(ConnectionSource) { - getServerApi() >> null - getReadPreference() >> readPreference getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT } def readBinding = Stub(ReadBinding) { - getServerApi() >> null getReadConnectionSource() >> connectionSource getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT } def operation = new ListIndexesOperation(helper.namespace, helper.decoder) @@ -270,7 +227,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification then: _ * connection.getDescription() >> helper.threeSixConnectionDescription - 1 * connection.command(_, _, _, readPreference, _, readBinding) >> helper.commandResult + 1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult 1 * connection.release() where: @@ -285,7 +242,6 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification getConnection(_) >> { it[0].onResult(connection, null) } } def readBinding = Stub(AsyncReadBinding) { - getServerApi() >> null getReadPreference() >> readPreference getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy index 5332eb34339..f7eb191773f 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy @@ -29,7 +29,6 @@ import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonDouble import org.bson.BsonInt32 -import org.bson.BsonInt64 import org.bson.BsonJavaScript import org.bson.BsonString import org.bson.Document @@ -42,15 +41,14 @@ import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.client.model.Filters.gte -import static java.util.concurrent.TimeUnit.MILLISECONDS class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpecification { def mapReduceInputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceInput') def mapReduceOutputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceOutput') def mapReduceOperation = new MapReduceToCollectionOperation(mapReduceInputNamespace, - new BsonJavaScript('function(){ emit( this.name , 1 ); }'), - new BsonJavaScript('function(key, values){ return values.length; }'), - mapReduceOutputNamespace.getCollectionName()) + new BsonJavaScript('function(){ emit( this.name , 1 ); }'), + new BsonJavaScript('function(key, values){ return values.length; }'), + mapReduceOutputNamespace.getCollectionName(), null) def expectedResults = [new BsonDocument('_id', new BsonString('Pete')).append('value', new BsonDouble(2.0)), new BsonDocument('_id', new BsonString('Sam')).append('value', new BsonDouble(1.0))] as Set def helper = new CollectionHelper(new BsonDocumentCodec(), mapReduceOutputNamespace) @@ -64,8 +62,9 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe } def cleanup() { - new DropCollectionOperation(mapReduceInputNamespace).execute(getBinding()) - new DropCollectionOperation(mapReduceOutputNamespace).execute(getBinding()) + new DropCollectionOperation(mapReduceInputNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropCollectionOperation(mapReduceOutputNamespace, WriteConcern.ACKNOWLEDGED) + .execute(getBinding()) } def 'should have the correct defaults'() { @@ -75,7 +74,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def out = 'outCollection' when: - def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out) + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, null) then: operation.getMapFunction() == mapF @@ -89,7 +88,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe operation.getLimit() == 0 operation.getScope() == null operation.getSort() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null !operation.isJsMode() @@ -118,7 +116,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe .limit(10) .scope(scope) .sort(sort) - .maxTime(1, MILLISECONDS) .bypassDocumentValidation(true) .collation(defaultCollation) @@ -133,7 +130,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe operation.getLimit() == 10 operation.getScope() == scope operation.getSort() == sort - operation.getMaxTime(MILLISECONDS) == 1 operation.getBypassDocumentValidation() == true operation.getCollation() == defaultCollation } @@ -183,7 +179,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, new BsonJavaScript('function(){ emit( "level" , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), - 'collectionOut') + 'collectionOut', null) execute(operation, async) then: @@ -246,7 +242,8 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def dbName = 'dbName' when: - def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, WriteConcern.MAJORITY) + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, + WriteConcern.MAJORITY) def expectedCommand = new BsonDocument('mapReduce', new BsonString(getCollectionName())) .append('map', mapF) .append('reduce', reduceF) @@ -261,14 +258,15 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe ReadPreference.primary(), false) when: - operation.action(action) + operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, + WriteConcern.MAJORITY) + .action(action) .databaseName(dbName) .finalizeFunction(finalizeF) .filter(filter) .limit(10) .scope(scope) .sort(sort) - .maxTime(10, MILLISECONDS) .bypassDocumentValidation(true) .verbose(true) @@ -279,7 +277,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe .append('scope', scope) .append('verbose', BsonBoolean.TRUE) .append('limit', new BsonInt32(10)) - .append('maxTimeMS', new BsonInt64(10)) if (includeCollation) { operation.collation(defaultCollation) @@ -310,7 +307,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, new BsonJavaScript('function(){ emit( this._id, this.str ); }'), new BsonJavaScript('function(key, values){ return values; }'), - 'collectionOut') + 'collectionOut', null) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy index 28986a76e33..3289f10f578 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy @@ -37,7 +37,6 @@ import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonDouble import org.bson.BsonInt32 -import org.bson.BsonInt64 import org.bson.BsonJavaScript import org.bson.BsonString import org.bson.BsonTimestamp @@ -46,17 +45,16 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS class MapReduceWithInlineResultsOperationSpecification extends OperationFunctionalSpecification { private final bsonDocumentCodec = new BsonDocumentCodec() - def mapReduceOperation = new MapReduceWithInlineResultsOperation( - getNamespace(), + def mapReduceOperation = new MapReduceWithInlineResultsOperation(getNamespace(), new BsonJavaScript('function(){ emit( this.name , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), bsonDocumentCodec) @@ -76,7 +74,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction when: def mapF = new BsonJavaScript('function(){ }') def reduceF = new BsonJavaScript('function(key, values){ }') - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, + bsonDocumentCodec) then: operation.getMapFunction() == mapF @@ -85,7 +84,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction operation.getFinalizeFunction() == null operation.getScope() == null operation.getSort() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getLimit() == 0 operation.getCollation() == null !operation.isJsMode() @@ -100,7 +98,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def finalizeF = new BsonJavaScript('function(key, value){}') def mapF = new BsonJavaScript('function(){ }') def reduceF = new BsonJavaScript('function(key, values){ }') - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + mapF, reduceF, bsonDocumentCodec) .filter(filter) .finalizeFunction(finalizeF) .scope(scope) @@ -108,7 +107,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction .jsMode(true) .verbose(true) .limit(20) - .maxTime(10, MILLISECONDS) .collation(defaultCollation) then: @@ -118,7 +116,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction operation.getFinalizeFunction() == finalizeF operation.getScope() == scope operation.getSort() == sort - operation.getMaxTime(MILLISECONDS) == 10 operation.getLimit() == 20 operation.getCollation() == defaultCollation operation.isJsMode() @@ -141,8 +138,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) then: testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.commandResult) @@ -153,8 +150,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should create the expected command'() { when: - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) def expectedCommand = new BsonDocument('mapReduce', new BsonString(helper.namespace.getCollectionName())) .append('map', operation.getMapFunction()) .append('reduce', operation.getReduceFunction()) @@ -171,7 +168,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction .jsMode(true) .verbose(true) .limit(20) - .maxTime(10, MILLISECONDS) expectedCommand.append('query', operation.getFilter()) @@ -180,7 +176,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction .append('finalize', operation.getFinalizeFunction()) .append('jsMode', BsonBoolean.TRUE) .append('verbose', BsonBoolean.TRUE) - .append('maxTimeMS', new BsonInt64(10)) .append('limit', new BsonInt32(20)) if (includeCollation) { @@ -204,8 +199,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction given: def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new MapReduceWithInlineResultsOperation( - namespace, + def operation = new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('function(){ emit( this.str, 1 ); }'), new BsonJavaScript('function(key, values){ return Array.sum(values); }'), bsonDocumentCodec) @@ -224,16 +218,16 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should add read concern to command'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(ReadBinding) def source = Stub(ConnectionSource) def connection = Mock(Connection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.readConnectionSource >> source - binding.sessionContext >> sessionContext source.connection >> connection source.retain() >> source - source.getServerApi() >> null + source.operationContext >> operationContext def commandDocument = BsonDocument.parse(''' { "mapReduce" : "coll", "map" : { "$code" : "function(){ }" }, @@ -242,8 +236,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction }''') appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) when: operation.execute(binding) @@ -251,7 +245,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.command(_, commandDocument, _, _, _, binding) >> + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> new BsonDocument('results', new BsonArrayWrapper([])) .append('counts', new BsonDocument('input', new BsonInt32(0)) @@ -273,14 +267,14 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should add read concern to command asynchronously'() { given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) def binding = Stub(AsyncReadBinding) def source = Stub(AsyncConnectionSource) def connection = Mock(AsyncConnection) binding.readPreference >> ReadPreference.primary() - binding.serverApi >> null + binding.operationContext >> operationContext binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } - binding.sessionContext >> sessionContext - source.serverApi >> null + source.operationContext >> operationContext source.getConnection(_) >> { it[0].onResult(connection, null) } source.retain() >> source def commandDocument = BsonDocument.parse(''' @@ -291,8 +285,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction }''') appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) when: executeAsync(operation, binding) @@ -300,7 +294,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction then: _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 6, STANDALONE, 1000, 100000, 100000, []) - 1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> { + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { it.last().onResult(new BsonDocument('results', new BsonArrayWrapper([])) .append('counts', new BsonDocument('input', new BsonInt32(0)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy index 7e7938acfe2..9363f6a1812 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy @@ -100,8 +100,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when no document with the same id exists, should insert the document'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], - ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -120,7 +120,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def document = new BsonDocument('_id', new BsonInt32(1)) getCollectionHelper().insertDocuments(document) - def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(document)], ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(document)], ordered, + ACKNOWLEDGED, false) when: execute(operation, async) @@ -135,8 +136,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'RawBsonDocument should not generate an _id'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))], - ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))], ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -399,11 +400,11 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def id = new ObjectId() def operation = new MixedBulkWriteOperation(getNamespace(), - [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), - new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), - REPLACE) - .upsert(true)], - true, ACKNOWLEDGED, false) + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), + REPLACE) + .upsert(true)], + true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -546,15 +547,15 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1), new Document('_id', 2)) def operation = new MixedBulkWriteOperation(getNamespace(), - [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), - new BsonDocument('_id', new BsonInt32(1)) - .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), - REPLACE), - new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), - new BsonDocument('_id', new BsonInt32(2)) - .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), - REPLACE)], - true, ACKNOWLEDGED, false) + [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)) + .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), + REPLACE), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('_id', new BsonInt32(2)) + .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), + REPLACE)], + true, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -636,13 +637,14 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should handle multi-length runs of UNACKNOWLEDGED insert, update, replace, and remove'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, UNACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, UNACKNOWLEDGED, + false) def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() when: def result = execute(operation, binding) - execute(new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED, - false,), binding) + execute(new MixedBulkWriteOperation(namespace, + [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED, false,), binding) then: !result.wasAcknowledged() @@ -710,12 +712,12 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'error details should have correct index on ordered write failure'() { given: def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), - new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), - new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), - UPDATE), - new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // this should fail with index 2 - ], true, ACKNOWLEDGED, false) + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), + UPDATE), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // this should fail with index 2 + ], true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -733,12 +735,12 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(getTestInserts()) def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), - new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), - new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), - UPDATE), - new InsertRequest(new BsonDocument('_id', new BsonInt32(3))) // this should fail with index 2 - ], false, ACKNOWLEDGED, false) + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), + UPDATE), + new InsertRequest(new BsonDocument('_id', new BsonInt32(3))) // this should fail with index 2 + ], false, ACKNOWLEDGED, false) when: execute(operation, async) @@ -804,8 +806,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw bulk write exception with a write concern error when wtimeout is exceeded'() { given: def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], - false, new WriteConcern(5, 1), false) + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], + false, new WriteConcern(5, 1), false) when: execute(operation, async) @@ -823,9 +825,9 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(getTestInserts()) def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))), - new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // duplicate key - ], false, new WriteConcern(4, 1), false) + [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // duplicate key + ], false, new WriteConcern(4, 1), false) when: execute(operation, async) // This is assuming that it won't be able to replicate to 4 servers in 1 ms @@ -926,8 +928,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def collectionHelper = getCollectionHelper(namespace) collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions().validationOptions( new ValidationOptions().validator(gte('level', 10)))) - def operation = new MixedBulkWriteOperation(namespace, [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered, - ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, + [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered, ACKNOWLEDGED, false) when: execute(operation, async) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy index 56c0029786c..043c6de48a3 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy @@ -35,61 +35,43 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan class RenameCollectionOperationSpecification extends OperationFunctionalSpecification { def cleanup() { - new DropCollectionOperation(new MongoNamespace(getDatabaseName(), 'newCollection')).execute(getBinding()) + new DropCollectionOperation(new MongoNamespace(getDatabaseName(), 'newCollection'), + WriteConcern.ACKNOWLEDGED).execute(getBinding()) } def 'should return rename a collection'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) + def operation = new RenameCollectionOperation(getNamespace(), + new MongoNamespace(getDatabaseName(), 'newCollection'), null) when: - new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection')).execute(getBinding()) + execute(operation, async) then: !collectionNameExists(getCollectionName()) collectionNameExists('newCollection') - } - - - def 'should return rename a collection asynchronously'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) - assert collectionNameExists(getCollectionName()) - - when: - executeAsync(new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection'))) - then: - !collectionNameExists(getCollectionName()) - collectionNameExists('newCollection') + where: + async << [true, false] } def 'should throw if not drop and collection exists'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) + def operation = new RenameCollectionOperation(getNamespace(), getNamespace(), null) when: - new RenameCollectionOperation(getNamespace(), getNamespace()).execute(getBinding()) + execute(operation, async) then: thrown(MongoServerException) collectionNameExists(getCollectionName()) - } - - def 'should throw if not drop and collection exists asynchronously'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) - assert collectionNameExists(getCollectionName()) - - when: - executeAsync(new RenameCollectionOperation(getNamespace(), getNamespace())) - - then: - thrown(MongoServerException) - collectionNameExists(getCollectionName()) + where: + async << [true, false] } @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() }) @@ -97,8 +79,8 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) - def operation = new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection'), - new WriteConcern(5)) + def operation = new RenameCollectionOperation(getNamespace(), + new MongoNamespace(getDatabaseName(), 'newCollection'), new WriteConcern(5)) when: async ? executeAsync(operation) : operation.execute(getBinding()) @@ -112,7 +94,6 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific async << [true, false] } - def collectionNameExists(String collectionName) { def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding()) if (!cursor.hasNext()) { diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java index 731f83c3c53..0eeeff8bdb4 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java @@ -23,19 +23,15 @@ import com.mongodb.ReadPreference; import com.mongodb.ServerCursor; import com.mongodb.async.FutureResultCallback; -import com.mongodb.internal.IgnorableRequestContext; -import com.mongodb.internal.binding.StaticBindingContext; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Connection; -import com.mongodb.internal.connection.NoOpSessionContext; -import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.validator.NoOpFieldNameValidator; import org.bson.BsonDocument; import org.bson.BsonInt64; import org.bson.BsonString; import org.bson.codecs.BsonDocumentCodec; -import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; final class TestOperationHelper { @@ -60,10 +56,7 @@ static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final Serv connection.command(namespace.getDatabaseName(), new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) .append("collection", new BsonString(namespace.getCollectionName())), - new NoOpFieldNameValidator(), ReadPreference.primary(), - new BsonDocumentCodec(), - new StaticBindingContext(new NoOpSessionContext(), getServerApi(), IgnorableRequestContext.INSTANCE, - new OperationContext()))); + new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT)); } static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final ServerCursor serverCursor, @@ -73,9 +66,7 @@ static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final Serv connection.commandAsync(namespace.getDatabaseName(), new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) .append("collection", new BsonString(namespace.getCollectionName())), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), - new StaticBindingContext(new NoOpSessionContext(), getServerApi(), IgnorableRequestContext.INSTANCE, - new OperationContext()), callback); + new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT, callback); callback.get(); }); } diff --git a/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java b/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java new file mode 100644 index 00000000000..226b035151c --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.test; + +import com.mongodb.test.extension.FlakyTestExtension; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.parallel.Execution; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import static org.junit.jupiter.api.parallel.ExecutionMode.SAME_THREAD; + +/** + * {@code @FlakyTest} is used to signal that the annotated method contains a flaky / racy test. + * + *

      The test will be repeated up to a {@linkplain #maxAttempts maximum number of times} with a + * configurable {@linkplain #name display name}. Each invocation will be repeated if the previous test fails. + */ +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Execution(SAME_THREAD) // cannot be run in parallel +@ExtendWith(FlakyTestExtension.class) +@TestTemplate +public @interface FlakyTest { + + /** + * Placeholder for the {@linkplain TestInfo#getDisplayName display name} of + * a {@code @RepeatedTest} method: {displayName} + */ + String DISPLAY_NAME_PLACEHOLDER = "{displayName}"; + + /** + * Placeholder for the current repetition count of a {@code @FlakyTest} + * method: {index} + */ + String CURRENT_REPETITION_PLACEHOLDER = "{index}"; + + /** + * Placeholder for the total number of repetitions of a {@code @FlakyTest} + * method: {totalRepetitions} + */ + String TOTAL_REPETITIONS_PLACEHOLDER = "{totalRepetitions}"; + + /** + * Short display name pattern for a repeated test: {@value #SHORT_DISPLAY_NAME} + * + * @see #CURRENT_REPETITION_PLACEHOLDER + * @see #TOTAL_REPETITIONS_PLACEHOLDER + * @see #LONG_DISPLAY_NAME + */ + String SHORT_DISPLAY_NAME = "Attempt: " + CURRENT_REPETITION_PLACEHOLDER + " / " + TOTAL_REPETITIONS_PLACEHOLDER; + + /** + * Long display name pattern for a repeated test: {@value #LONG_DISPLAY_NAME} + * + * @see #DISPLAY_NAME_PLACEHOLDER + * @see #SHORT_DISPLAY_NAME + */ + String LONG_DISPLAY_NAME = DISPLAY_NAME_PLACEHOLDER + " " + SHORT_DISPLAY_NAME; + + /** + * max number of attempts + * + * @return N-times repeat test if it failed + */ + int maxAttempts() default 1; + + /** + * Display name for test method + * + * @return Short name + */ + String name() default LONG_DISPLAY_NAME; +} diff --git a/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java b/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java new file mode 100644 index 00000000000..55ddd7a001e --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java @@ -0,0 +1,198 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.test.extension; + +import com.mongodb.test.FlakyTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.AfterTestExecutionCallback; +import org.junit.jupiter.api.extension.BeforeTestExecutionCallback; +import org.junit.jupiter.api.extension.ConditionEvaluationResult; +import org.junit.jupiter.api.extension.ExecutionCondition; +import org.junit.jupiter.api.extension.Extension; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.api.extension.TestExecutionExceptionHandler; +import org.junit.jupiter.api.extension.TestInstantiationException; +import org.junit.jupiter.api.extension.TestTemplateInvocationContext; +import org.junit.jupiter.api.extension.TestTemplateInvocationContextProvider; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.platform.commons.util.Preconditions; +import org.opentest4j.TestAbortedException; + +import java.lang.reflect.Method; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Spliterator; +import java.util.stream.Stream; + +import static com.mongodb.test.FlakyTest.CURRENT_REPETITION_PLACEHOLDER; +import static com.mongodb.test.FlakyTest.DISPLAY_NAME_PLACEHOLDER; +import static com.mongodb.test.FlakyTest.TOTAL_REPETITIONS_PLACEHOLDER; +import static java.util.Collections.singletonList; +import static java.util.Spliterators.spliteratorUnknownSize; +import static java.util.stream.StreamSupport.stream; +import static org.junit.platform.commons.util.AnnotationUtils.findAnnotation; +import static org.junit.platform.commons.util.AnnotationUtils.isAnnotated; + + +/** + * A {@code TestTemplateInvocationContextProvider} that supports the {@link FlakyTest @FlakyTest} annotation. + */ +public class FlakyTestExtension implements TestTemplateInvocationContextProvider, + BeforeTestExecutionCallback, + AfterTestExecutionCallback, + TestExecutionExceptionHandler { + + private int maxAttempts = 0; + private FlakyTestDisplayFormatter formatter; + private Boolean testHasPassed; + private int currentAttempt = 0; + + + @Override + public void afterTestExecution(final ExtensionContext extensionContext) { + testHasPassed = extensionContext.getExecutionException().map(e -> e instanceof TestInstantiationException).orElse(true); + } + + @Override + public boolean supportsTestTemplate(final ExtensionContext context) { + return isAnnotated(context.getTestMethod(), FlakyTest.class); + } + + @Override + public Stream provideTestTemplateInvocationContexts(final ExtensionContext context) { + Method testMethod = context.getRequiredTestMethod(); + String displayName = context.getDisplayName(); + + if (isAnnotated(testMethod, Test.class)) { + throw new TestInstantiationException(String.format("Test %s also annotated with @Test", displayName)); + } else if (isAnnotated(testMethod, ParameterizedTest.class)) { + throw new TestInstantiationException(String.format("Test %s also annotated with @ParameterizedTest", displayName)); + } + + FlakyTest flakyTest = findAnnotation(testMethod, FlakyTest.class) + .orElseThrow(() -> + new TestInstantiationException("The extension should not be executed unless the test method is " + + "annotated with @FlakyTest.")); + + formatter = displayNameFormatter(flakyTest, testMethod, displayName); + + maxAttempts = flakyTest.maxAttempts(); + Preconditions.condition(maxAttempts > 0, "Total repeats must be higher than 0"); + + //Convert logic of repeated handler to spliterator + Spliterator spliterator = + spliteratorUnknownSize(new TestTemplateIterator(), Spliterator.NONNULL); + return stream(spliterator, false); + } + + private FlakyTestDisplayFormatter displayNameFormatter(final FlakyTest flakyTest, final Method method, + final String displayName) { + String pattern = Preconditions.notBlank(flakyTest.name().trim(), () -> String.format( + "Configuration error: @FlakyTest on method [%s] must be declared with a non-empty name.", method)); + return new FlakyTestDisplayFormatter(pattern, displayName); + } + + @Override + public void handleTestExecutionException(final ExtensionContext context, final Throwable throwable) throws Throwable { + if (currentAttempt < maxAttempts) { + // Mark failure as skipped / aborted so to pass CI + throw new TestAbortedException("Test failed on attempt: " + currentAttempt); + } + throw throwable; + } + + @Override + public void beforeTestExecution(final ExtensionContext context) { + currentAttempt++; + } + + /** + * TestTemplateIterator (Repeat test if it failed) + */ + class TestTemplateIterator implements Iterator { + private int currentIndex = 0; + + @Override + public boolean hasNext() { + if (currentIndex == 0) { + return true; + } + return !testHasPassed && currentIndex < maxAttempts; + } + + @Override + public TestTemplateInvocationContext next() { + if (hasNext()) { + currentIndex++; + return new RepeatInvocationContext(currentIndex, maxAttempts, formatter); + } + throw new NoSuchElementException(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + static class RepeatInvocationContext implements TestTemplateInvocationContext { + private final int currentRepetition; + private final int totalTestRuns; + private final FlakyTestDisplayFormatter formatter; + + RepeatInvocationContext(final int currentRepetition, final int totalRepetitions, final FlakyTestDisplayFormatter formatter) { + this.currentRepetition = currentRepetition; + this.totalTestRuns = totalRepetitions; + this.formatter = formatter; + } + + @Override + public String getDisplayName(final int invocationIndex) { + return formatter.format(currentRepetition, totalTestRuns); + } + + @Override + public List getAdditionalExtensions() { + return singletonList((ExecutionCondition) context -> { + if (currentRepetition > totalTestRuns) { + return ConditionEvaluationResult.disabled("All attempts failed"); + } else { + return ConditionEvaluationResult.enabled("Test failed - retry"); + } + }); + } + } + + static class FlakyTestDisplayFormatter { + private final String pattern; + private final String displayName; + + FlakyTestDisplayFormatter(final String pattern, final String displayName) { + this.pattern = pattern; + this.displayName = displayName; + } + + String format(final int currentRepetition, final int totalRepetitions) { + return pattern + .replace(DISPLAY_NAME_PLACEHOLDER, displayName) + .replace(CURRENT_REPETITION_PLACEHOLDER, String.valueOf(currentRepetition)) + .replace(TOTAL_REPETITIONS_PLACEHOLDER, String.valueOf(totalRepetitions)); + } + + } + +} diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json b/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json index 443aa0aa232..247541646cc 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json @@ -4,6 +4,7 @@ "minServerVersion": "4.4" } ], + "comment": "Updated timeoutMS and blockTimeMS manually to address race conditions in tests with SSL handshake.", "database_name": "cse-timeouts-db", "collection_name": "cse-timeouts-coll", "data": [], @@ -110,7 +111,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 250 } }, "clientOptions": { @@ -119,7 +120,7 @@ "aws": {} } }, - "timeoutMS": 50 + "timeoutMS": 200 }, "operations": [ { @@ -161,7 +162,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 3 + "times": 2 }, "data": { "failCommands": [ @@ -169,7 +170,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 300 } }, "clientOptions": { @@ -178,7 +179,7 @@ "aws": {} } }, - "timeoutMS": 50 + "timeoutMS": 500 }, "operations": [ { diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.md b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.md new file mode 100644 index 00000000000..b4160500f54 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.md @@ -0,0 +1,618 @@ +# Client Side Operations Timeouts Tests + +______________________________________________________________________ + +## Introduction + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +## Spec Tests + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of +them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these +tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another +with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and +`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters. + +## Prose Tests + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a +test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests +MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to +listen for `command_started` events. + +### 1. Multi-batch writes + +This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an +exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow +for differing bulk encoding performance. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call. + + - Expect this to fail with a timeout error. + +5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. + +### 2. maxTimeMS is not set for commands sent to mongocryptd + +This test MUST only be run against enterprise server versions 4.2 and higher. + +1. Launch a mongocryptd process on 23000. +2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. +3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. +4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + +### 3. ClientEncryption + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY` +refers to the following base64: + +```javascript +Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk +``` + +For each test, perform the following setup: + +1. Using `internalClient`, drop and create the `keyvault.datakeys` collection. + +2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`. + +3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this + object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map: + + ```javascript + { + "local": { "key": } + } + ``` + +#### createDataKey + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to fail with a timeout error. + +3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. + +#### encrypt + +1. Call `client_encryption.createDataKey()` with the `local` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`. + + - Expect this to fail with a timeout error. + +4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. + +#### decrypt + +1. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`. + +2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`. + + - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`. + +3. Close and re-create the `keyVaultClient` and `clientEncryption` objects. + +4. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +5. Call `clientEncryption.decrypt()` with the value `encrypted`. + + - Expect this to fail with a timeout error. + +6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. + +### 4. Background Connection Pooling + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields +(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for +some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are +not published within that time. + +#### timeoutMS used for handshake commands + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 10 + - `appName` of `timeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published. + +#### timeoutMS is refreshed for each handshake command + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 20 + - `appName` of `refreshTimeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published. + +### 5. Blocking Iteration Methods + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an +error occurs. + +#### Tailable cursors + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`. + +3. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`. + + - Expect this to succeed and return a cursor with a non-zero ID. + +6. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command. + +7. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the + test. + +#### Change Streams + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +4. Using `client`, use the `watch` helper to create a change stream against `db.coll`. + + - Expect this to succeed and return a change stream with a non-zero ID. + +5. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during + the test. + +### 6. GridFS - Upload + +Tests in this section MUST only be run against server versions 4.4 and higher. + +#### uploads via openUploadStream can be timed out + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload a single `0x12` byte. + +7. Call `uploadStream.close()` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +#### Aborting an upload stream can be timed out + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with + `chunkSizeBytes=2`. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`. + +7. Call `uploadStream.abort()`. + + - Expect this to fail with a timeout error. + +### 7. GridFS - Download + +This test MUST only be run against server versions 4.4 and higher. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, insert the following document into the `db.fs.files` collection: + + ```javascript + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream + (referred to as `downloadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +7. Read from the `downloadStream`. + + - Expect this to fail with a timeout error. + +8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against + `db.fs.chunks`. + +### 8. Server Selection + +#### serverSelectionTimeoutMS honored if timeoutMS is not set + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. +2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0 + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +### 9. endSession + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run +three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via +the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the +`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient + (referred to as `session`). + +4. Execute the following code: + + ```typescript + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + ``` + +5. Using `session`, execute `session.end_session` + + - Expect this to fail with a timeout error after no more than 15ms. + +### 10. Convenient Transactions + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +#### timeoutMS is refreshed for abortTransaction if the callback fails + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession + derived from that MongoClient (referred to as `session`). + +4. Using `session`, execute a `withTransaction` operation with the following callback: + + ```typescript + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + ``` + +5. Expect the previous `withTransaction` call to fail with a timeout error. + +6. Verify that the following events were published during the `withTransaction` call: + + 1. `command_started` and `command_failed` events for an `insert` command. + 2. `command_started` and `command_failed` events for an `abortTransaction` command. + +## Unit Tests + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set. +- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a + server has been selected. +- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server + has been selected. +- If a new connection is required to execute an operation, + `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment. +- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should + apply to HTTP requests against OCSP responders. +- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.rst b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.rst new file mode 100644 index 00000000000..8a6bba61dac --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.rst @@ -0,0 +1,616 @@ +====================================== +Client Side Operations Timeouts Tests +====================================== + +.. contents:: + +---- + +Introduction +============ + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +Spec Tests +========== + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some +of them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute +these tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and +another with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the ``single-node-auth.json`` +and ``single-node-auth-ssl.json`` files in the ``drivers-evergreen-tools`` repository to create these clusters. + +Prose Tests +=========== + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the ``timeoutMS`` option set (referred to as ``internalClient``). Any fail points set +during a test MUST be unset using ``internalClient`` after the test has been executed. All MongoClient instances +created for tests MUST be configured with read/write concern ``majority``, read preference ``primary``, and command +monitoring enabled to listen for ``command_started`` events. + +1. Multi-batch writes +~~~~~~~~~~~~~~~~~~~~~ + +This test MUST only run against standalones on server versions 4.4 and higher. +The ``insertMany`` call takes an exceedingly long time on replicasets and sharded +clusters. Drivers MAY adjust the timeouts used in this test to allow for differing +bulk encoding performance. + +#. Using ``internalClient``, drop the ``db.coll`` collection. +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=2000``. +#. Using ``client``, insert 50 1-megabyte documents in a single ``insertMany`` call. + + - Expect this to fail with a timeout error. + +#. Verify that two ``insert`` commands were executed against ``db.coll`` as part of the ``insertMany`` call. + +2. maxTimeMS is not set for commands sent to mongocryptd +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This test MUST only be run against enterprise server versions 4.2 and higher. + +#. Launch a mongocryptd process on 23000. +#. Create a MongoClient (referred to as ``client``) using the URI ``mongodb://localhost:23000/?timeoutMS=1000``. +#. Using ``client``, execute the ``{ ping: 1 }`` command against the ``admin`` database. +#. Verify via command monitoring that the ``ping`` command sent did not contain a ``maxTimeMS`` field. + +3. ClientEncryption +~~~~~~~~~~~~~~~~~~~ + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, +``LOCAL_MASTERKEY`` refers to the following base64: + +.. code:: javascript + + Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + +For each test, perform the following setup: + +#. Using ``internalClient``, drop and create the ``keyvault.datakeys`` collection. +#. Create a MongoClient (referred to as ``keyVaultClient``) with ``timeoutMS=10``. +#. Create a ``ClientEncryption`` object that wraps ``keyVaultClient`` (referred to as ``clientEncryption``). Configure this object with ``keyVaultNamespace`` set to ``keyvault.datakeys`` and the following KMS providers map: + + .. code:: javascript + + { + "local": { "key": } + } + +createDataKey +````````````` + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider. + + - Expect this to fail with a timeout error. + +#. Verify that an ``insert`` command was executed against to ``keyvault.datakeys`` as part of the ``createDataKey`` call. + +encrypt +``````` + +#. Call ``client_encryption.createDataKey()`` with the ``local`` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as ``datakeyId``. + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``datakeyId``. + + - Expect this to fail with a timeout error. + +#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``encrypt`` call. + +decrypt +``````` + +#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as ``dataKeyId``. + +#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``dataKeyId``. + + - Expect this to return a BSON binary with subtype 6, referred to as ``encrypted``. + +#. Close and re-create the ``keyVaultClient`` and ``clientEncryption`` objects. + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Call ``clientEncryption.decrypt()`` with the value ``encrypted``. + + - Expect this to fail with a timeout error. + +#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``decrypt`` call. + +4. Background Connection Pooling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication +fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait +for some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events +are not published within that time. + +timeoutMS used for handshake commands +````````````````````````````````````` + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + +#. Create a MongoClient (referred to as ``client``) configured with the following: + + - ``minPoolSize`` of 1 + - ``timeoutMS`` of 10 + - ``appName`` of ``timeoutBackgroundPoolTest`` + - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionClosedEvent`` events. + +#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionClosedEvent`` to be published. + +timeoutMS is refreshed for each handshake command +````````````````````````````````````````````````` + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + +#. Create a MongoClient (referred to as ``client``) configured with the following: + + - ``minPoolSize`` of 1 + - ``timeoutMS`` of 20 + - ``appName`` of ``refreshTimeoutBackgroundPoolTest`` + - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionReady`` events. + +#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionReady`` to be published. + +5. Blocking Iteration Methods +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes ``getMore`` commands in a loop until a document is available or an +error occurs. + +Tailable cursors +```````````````` + +#. Using ``internalClient``, drop the ``db.coll`` collection. +#. Using ``internalClient``, insert the document ``{ x: 1 }`` into ``db.coll``. +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``. +#. Using ``client``, create a tailable cursor on ``db.coll`` with ``cursorType=tailable``. + + - Expect this to succeed and return a cursor with a non-zero ID. + +#. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document ``{ x: 1 }`` without sending a ``getMore`` command. + +#. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +#. Verify that a ``find`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test. + +Change Streams +`````````````` + +#. Using ``internalClient``, drop the ``db.coll`` collection. +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``. +#. Using ``client``, use the ``watch`` helper to create a change stream against ``db.coll``. + + - Expect this to succeed and return a change stream with a non-zero ID. + +#. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +#. Verify that an ``aggregate`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test. + +6. GridFS - Upload +~~~~~~~~~~~~~~~~~~ + +Tests in this section MUST only be run against server versions 4.4 and higher. + +uploads via openUploadStream can be timed out +````````````````````````````````````````````` + +#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. +#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database. +#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``). + + - Expect this to succeed and return a non-null stream. + +#. Using ``uploadStream``, upload a single ``0x12`` byte. +#. Call ``uploadStream.close()`` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +Aborting an upload stream can be timed out +`````````````````````````````````````````` + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. +#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database with ``chunkSizeBytes=2``. +#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``). + + - Expect this to succeed and return a non-null stream. + +#. Using ``uploadStream``, upload the bytes ``[0x01, 0x02, 0x03, 0x04]``. +#. Call ``uploadStream.abort()``. + + - Expect this to fail with a timeout error. + +7. GridFS - Download +~~~~~~~~~~~~~~~~~~~~ + +This test MUST only be run against server versions 4.4 and higher. + +#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. +#. Using ``internalClient``, insert the following document into the ``db.fs.files`` collection: + + .. code:: javascript + + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. +#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database. +#. Call ``bucket.open_download_stream`` with the id ``{ "$oid": "000000000000000000000005" }`` to create a download stream (referred to as ``downloadStream``). + + - Expect this to succeed and return a non-null stream. + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Read from the ``downloadStream``. + + - Expect this to fail with a timeout error. + +#. Verify that two ``find`` commands were executed during the read: one against ``db.fs.files`` and another against ``db.fs.chunks``. + +8. Server Selection +~~~~~~~~~~~~~~~~~~~ + +serverSelectionTimeoutMS honored if timeoutMS is not set +```````````````````````````````````````````````````````` + +#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?serverSelectionTimeoutMS=10``. + +#. Using ``client``, execute the command ``{ ping: 1 }`` against the ``admin`` database. + + - Expect this to fail with a server selection timeout error after no more than 15ms. + +timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS +`````````````````````````````````````````````````````````````````````````````````` + +#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20``. + +#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. + + - Expect this to fail with a server selection timeout error after no more than 15ms. + +serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS +`````````````````````````````````````````````````````````````````````````````````` + +#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10``. + +#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. + + - Expect this to fail with a server selection timeout error after no more than 15ms. + +serverSelectionTimeoutMS honored for server selection if timeoutMS=0 +```````````````````````````````````````````````````````````````````` + +#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10``. + +#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. + + - Expect this to fail with a server selection timeout error after no more than 15ms. + +timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS +``````````````````````````````````````````````````````````````````````````````````````````````` + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a +username and password). + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10`` and ``serverSelectionTimeoutMS=20``. +#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``. + + - Expect this to fail with a timeout error after no more than 15ms. + +serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS +``````````````````````````````````````````````````````````````````````````````````````````````` + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a +username and password). + +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20`` and ``serverSelectionTimeoutMS=10``. +#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``. + + - Expect this to fail with a timeout error after no more than 15ms. + +9. endSession +~~~~~~~~~~~~~ + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be +run three times: once with the timeout specified via the MongoClient ``timeoutMS`` option, once with the timeout +specified via the ClientSession ``defaultTimeoutMS`` option, and once more with the timeout specified via the +``timeoutMS`` option for the ``endSession`` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +#. Using ``internalClient``, drop the ``db.coll`` collection. +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) and an explicit ClientSession derived from that MongoClient (referred to as ``session``). +#. Execute the following code: + + .. code:: typescript + + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + +#. Using ``session``, execute ``session.end_session`` + + - Expect this to fail with a timeout error after no more than 15ms. + +10. Convenient Transactions +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +timeoutMS is refreshed for abortTransaction if the callback fails +````````````````````````````````````````````````````````````````` + +#. Using ``internalClient``, drop the ``db.coll`` collection. +#. Using ``internalClient``, set the following fail point: + + .. code:: javascript + + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + +#. Create a new MongoClient (referred to as ``client``) configured with ``timeoutMS=10`` and an explicit ClientSession derived from that MongoClient (referred to as ``session``). +#. Using ``session``, execute a ``withTransaction`` operation with the following callback: + + .. code:: typescript + + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + +#. Expect the previous ``withTransaction`` call to fail with a timeout error. +#. Verify that the following events were published during the ``withTransaction`` call: + + #. ``command_started`` and ``command_failed`` events for an ``insert`` command. + #. ``command_started`` and ``command_failed`` events for an ``abortTransaction`` command. + +Unit Tests +========== + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore ``waitQueueTimeoutMS`` if ``timeoutMS`` is also set. +- If ``timeoutMS`` is set for an operation, the remaining ``timeoutMS`` value should apply to connection checkout after a server has been selected. +- If ``timeoutMS`` is not set for an operation, ``waitQueueTimeoutMS`` should apply to connection checkout after a server has been selected. +- If a new connection is required to execute an operation, ``min(remaining computedServerSelectionTimeout, connectTimeoutMS)`` should apply to socket establishment. +- For drivers that have control over OCSP behavior, ``min(remaining computedServerSelectionTimeout, 5 seconds)`` should apply to HTTP requests against OCSP responders. +- If ``timeoutMS`` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining ``timeoutMS`` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining ``timeoutMS`` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing ``minPoolSize`` maintenance, ``connectTimeoutMS`` is used as the timeout for socket establishment. diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/bulkWrite.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/bulkWrite.json new file mode 100644 index 00000000000..9a05809f77c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/bulkWrite.json @@ -0,0 +1,160 @@ +{ + "description": "timeoutMS behaves correctly for bulkWrite operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "w": 1 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to entire bulkWrite, not individual commands", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": {} + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert", + "update" + ], + "blockConnection": true, + "blockTimeMS": 120 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 1 + } + } + } + ], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/change-streams.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/change-streams.json new file mode 100644 index 00000000000..8cffb08e267 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/change-streams.json @@ -0,0 +1,598 @@ +{ + "description": "timeoutMS behaves correctly for change streams", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to initial aggregate", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 1050 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200, + "batchSize": 2, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to full resume attempt in a next call", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore", + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 120, + "errorCode": 7, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "change stream can be iterated again if previous iteration times out", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "maxAwaitTimeMS": 1, + "timeoutMS": 200 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json new file mode 100644 index 00000000000..a8b2d724fa9 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json @@ -0,0 +1,239 @@ +{ + "description": "timeoutMS behaves correctly when closing cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 200 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 200 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 20 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "close", + "object": "cursor", + "arguments": { + "timeoutMS": 40 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/command-execution.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/command-execution.json new file mode 100644 index 00000000000..b9b306c7fb6 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/command-execution.json @@ -0,0 +1,393 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + }, + { + "description": "command is not sent if RTT is greater than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "rttTooHighTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "rttTooHighTest", + "w": 1, + "timeoutMS": 10, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 4 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + } + ] + } + ] + }, + { + "description": "short-circuit is not enabled with only 1 RTT measurement", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, + "heartbeatFrequencyMS": 100000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/convenient-transactions.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/convenient-transactions.json new file mode 100644 index 00000000000..3868b3026c2 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/convenient-transactions.json @@ -0,0 +1,209 @@ +{ + "description": "timeoutMS behaves correctly for the withTransaction API", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction raises a client-side error if timeoutMS is overridden inside the callback", + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session", + "timeoutMS": 100 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "timeoutMS is not refreshed for each operation in the callback", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + }, + "session": "session" + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/cursors.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/cursors.json new file mode 100644 index 00000000000..36949d75091 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/cursors.json @@ -0,0 +1,113 @@ +{ + "description": "tests for timeoutMS behavior that applies to all cursor types", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client" + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "find errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "collection aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "database aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listCollections errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listIndexes errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/deprecated-options.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/deprecated-options.json new file mode 100644 index 00000000000..2ecba25f0d3 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/deprecated-options.json @@ -0,0 +1,7180 @@ +{ + "description": "operations ignore deprecated timeout options if timeoutMS is set", + "comment": "Manually changed session to use defaultTimeoutMS when testing socket / maxCommit overrides", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 10000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores wTimeoutMS if timeoutMS is set", + "comment": "Moved timeoutMS from commitTransaction to startTransaction manually, as commitTransaction does not support a timeoutMS option.", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 1000, + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 10000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores wTimeoutMS if timeoutMS is set", + "comment": "Moved timeoutMS from abortTransaction to startTransaction manually, as abortTransaction does not support a timeoutMS option.", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/error-transformations.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/error-transformations.json new file mode 100644 index 00000000000..4889e39583a --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/error-transformations.json @@ -0,0 +1,180 @@ +{ + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "basic MaxTimeMSExpired error is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "write concern error MaxTimeMSExpired is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 50, + "errmsg": "maxTimeMS expired" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/global-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/global-timeoutMS.json new file mode 100644 index 00000000000..740bbad2e2c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/global-timeoutMS.json @@ -0,0 +1,5830 @@ +{ + "description": "timeoutMS can be configured on a MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-advanced.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-advanced.json new file mode 100644 index 00000000000..c6c0944d2f4 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-advanced.json @@ -0,0 +1,385 @@ +{ + "description": "timeoutMS behaves correctly for advanced GridFS API operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo", + "timeoutMS": 2000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to update during a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "arguments": { + "timeoutMS": 2000 + } + } + ] + }, + { + "description": "timeoutMS applied to files collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop", + "databaseName": "test", + "command": { + "drop": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to chunks collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to drop as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-delete.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-delete.json new file mode 100644 index 00000000000..9f4980114be --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-delete.json @@ -0,0 +1,285 @@ +{ + "description": "timeoutMS behaves correctly for GridFS delete operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for delete", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to delete against the files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to delete against the chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to entire delete, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-download.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-download.json new file mode 100644 index 00000000000..fb0b582706c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-download.json @@ -0,0 +1,360 @@ +{ + "description": "timeoutMS behaves correctly for GridFS download operations", + "comment": "Manually increased timeouts to reduce races", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for download", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to find to get files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find to get chunks", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to entire download, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-find.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-find.json new file mode 100644 index 00000000000..74090362844 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-find.json @@ -0,0 +1,183 @@ +{ + "description": "timeoutMS behaves correctly for GridFS find operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {}, + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find command", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-upload.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-upload.json new file mode 100644 index 00000000000..b3f174973de --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-upload.json @@ -0,0 +1,409 @@ +{ + "description": "timeoutMS behaves correctly for GridFS upload operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for upload", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to initial find on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to chunk insertion", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to creation of files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to upload as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/legacy-timeouts.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/legacy-timeouts.json new file mode 100644 index 00000000000..535425c934a --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/legacy-timeouts.json @@ -0,0 +1,379 @@ +{ + "description": "legacy timeouts continue to work if timeoutMS is not set", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "socketTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "socketTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "waitQueueTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "wTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "wTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + }, + "writeConcern": { + "wtimeout": 50000 + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS option is used directly as the maxTimeMS field on a command", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "maxTimeMS": 50000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": 50000 + } + } + } + ] + } + ] + }, + { + "description": "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 1000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": 1000 + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/non-tailable-cursors.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/non-tailable-cursors.json new file mode 100644 index 00000000000..dd22ac3996f --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/non-tailable-cursors.json @@ -0,0 +1,542 @@ +{ + "description": "timeoutMS behaves correctly for non-tailable cursors", + "comment": "Manually reduced blockTimeMS for tests to pass in serverless", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "collectionName": "aggregateOutputColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to find if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is unset", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 101 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMS": 200, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 101 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "timeoutMS": 200, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find if timeoutMode is iteration", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 101 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "timeoutMS": 200, + "batchSize": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $out errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$out": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "aggregate with $merge errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$merge": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-collection-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-collection-timeoutMS.json new file mode 100644 index 00000000000..d17e22fc2f4 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-collection-timeoutMS.json @@ -0,0 +1,3498 @@ +{ + "description": "timeoutMS can be overridden for a MongoCollection", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-database-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-database-timeoutMS.json new file mode 100644 index 00000000000..f7fa642c582 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-database-timeoutMS.json @@ -0,0 +1,4622 @@ +{ + "description": "timeoutMS can be overridden for a MongoDatabase", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-operation-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-operation-timeoutMS.json new file mode 100644 index 00000000000..6fa0bd802a6 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-operation-timeoutMS.json @@ -0,0 +1,3577 @@ +{ + "description": "timeoutMS can be overridden for an operation", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 0, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-legacy-timeouts.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-legacy-timeouts.json new file mode 100644 index 00000000000..aded781aeed --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-legacy-timeouts.json @@ -0,0 +1,3042 @@ +{ + "description": "legacy timeouts behave correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "operation succeeds after one socket timeout - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-timeoutMS.json new file mode 100644 index 00000000000..9daad260ef3 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-timeoutMS.json @@ -0,0 +1,5688 @@ +{ + "description": "timeoutMS behaves correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/runCursorCommand.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/runCursorCommand.json new file mode 100644 index 00000000000..5fc0be33997 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/runCursorCommand.json @@ -0,0 +1,583 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "commandClient", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "commandDb", + "client": "commandClient", + "databaseName": "commandDb" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "errors if timeoutMode is set without timeoutMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if timeoutMode is cursorLifetime and cursorType is tailableAwait", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "timeoutMS": 100, + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "cappedCollection", + "batchSize": 1, + "tailable": true + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 1, + "cursorType": "tailable" + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "foo": "bar" + }, + { + "fizz": "buzz" + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true + }, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection" + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-inherit-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-inherit-timeoutMS.json new file mode 100644 index 00000000000..13ea91c7948 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-inherit-timeoutMS.json @@ -0,0 +1,331 @@ +{ + "description": "sessions inherit timeoutMS from their parent MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-operation-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-operation-timeoutMS.json new file mode 100644 index 00000000000..441c698328c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-operation-timeoutMS.json @@ -0,0 +1,335 @@ +{ + "description": "timeoutMS can be overridden for individual session operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500, + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-timeoutMS.json new file mode 100644 index 00000000000..d90152e909c --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-timeoutMS.json @@ -0,0 +1,331 @@ +{ + "description": "timeoutMS can be overridden at the level of a ClientSession", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 500 + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-awaitData.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-awaitData.json new file mode 100644 index 00000000000..d0fe950dd8e --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-awaitData.json @@ -0,0 +1,424 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "comment": "Manually changed: timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set. Added ignoreExtra events, sometimes an extra getMore is called.", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 250, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 250, + "batchSize": 1, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-non-awaitData.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-non-awaitData.json new file mode 100644 index 00000000000..e88230e4f7a --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-non-awaitData.json @@ -0,0 +1,312 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailable" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "timeoutMS": 200, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy new file mode 100644 index 00000000000..43deb3bd42c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import javax.net.ssl.SSLContext +import java.util.concurrent.TimeUnit +import java.util.function.Supplier + +class ClientEncryptionSettingsSpecification extends Specification { + + def 'should have return the configured values defaults'() { + given: + def mongoClientSettings = MongoClientSettings.builder().build() + def keyVaultNamespace = "keyVaultNamespace" + def kmsProvider = ["provider": ["test" : "test"]] + def kmsProviderSupplier = ["provider": { ["test" : "test"] } as Supplier] + def kmsProviderSslContextMap = ["provider": SSLContext.getDefault()] + + when: + def options = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(mongoClientSettings) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProvider) + .build() + + then: + options.getKeyVaultMongoClientSettings() == mongoClientSettings + options.getKeyVaultNamespace() == keyVaultNamespace + options.getKmsProviders() == kmsProvider + options.getKmsProviderPropertySuppliers() == [:] + options.getKmsProviderSslContextMap() == [:] + options.getTimeout(TimeUnit.MILLISECONDS) == null + + when: + options = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(mongoClientSettings) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProvider) + .kmsProviderPropertySuppliers(kmsProviderSupplier) + .kmsProviderSslContextMap(kmsProviderSslContextMap) + .timeout(1_000, TimeUnit.MILLISECONDS) + .build() + + then: + options.getKeyVaultMongoClientSettings() == mongoClientSettings + options.getKeyVaultNamespace() == keyVaultNamespace + options.getKmsProviders() == kmsProvider + options.getKmsProviderPropertySuppliers() == kmsProviderSupplier + options.getKmsProviderSslContextMap() == kmsProviderSslContextMap + options.getTimeout(TimeUnit.MILLISECONDS) == 1_000 + } + + def 'should throw an exception if the defaultTimeout is set and negative'() { + given: + def builder = ClientEncryptionSettings.builder() + + when: + builder.timeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.timeout(-1, TimeUnit.SECONDS) + + then: + thrown(IllegalArgumentException) + } + +} diff --git a/driver-legacy/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy similarity index 83% rename from driver-legacy/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy rename to driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy index d48199f7b12..98bf163f9e3 100644 --- a/driver-legacy/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy @@ -18,6 +18,8 @@ package com.mongodb import spock.lang.Specification +import java.util.concurrent.TimeUnit + class ClientSessionOptionsSpecification extends Specification { def 'should have correct defaults'() { @@ -45,6 +47,23 @@ class ClientSessionOptionsSpecification extends Specification { transactionOptions << [TransactionOptions.builder().build(), TransactionOptions.builder().readConcern(ReadConcern.LOCAL).build()] } + def 'should throw an exception if the defaultTimeout is set and negative'() { + given: + def builder = ClientSessionOptions.builder() + + when: + builder.defaultTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.defaultTimeout(-1, TimeUnit.SECONDS) + + then: + thrown(IllegalArgumentException) + } + def 'should apply options to builder'() { expect: ClientSessionOptions.builder(baseOptions).build() == baseOptions diff --git a/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy index 90f28833ba5..ec5d92b1e49 100644 --- a/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy @@ -50,7 +50,7 @@ class MongoClientSettingsSpecification extends Specification { settings.getReadPreference() == ReadPreference.primary() settings.getCommandListeners().isEmpty() settings.getApplicationName() == null - settings.getLoggerSettings() == LoggerSettings.builder().build(); + settings.getLoggerSettings() == LoggerSettings.builder().build() settings.clusterSettings == ClusterSettings.builder().build() settings.connectionPoolSettings == ConnectionPoolSettings.builder().build() settings.socketSettings == SocketSettings.builder().build() @@ -64,6 +64,7 @@ class MongoClientSettingsSpecification extends Specification { settings.contextProvider == null settings.dnsClient == null settings.inetAddressResolver == null + settings.getTimeout(TimeUnit.MILLISECONDS) == null } @SuppressWarnings('UnnecessaryObjectReferences') @@ -151,6 +152,7 @@ class MongoClientSettingsSpecification extends Specification { .contextProvider(contextProvider) .dnsClient(dnsClient) .inetAddressResolver(inetAddressResolver) + .timeout(1000, TimeUnit.SECONDS) .build() then: @@ -172,6 +174,7 @@ class MongoClientSettingsSpecification extends Specification { settings.getContextProvider() == contextProvider settings.getDnsClient() == dnsClient settings.getInetAddressResolver() == inetAddressResolver + settings.getTimeout(TimeUnit.MILLISECONDS) == 1_000_000 } def 'should be easy to create new settings from existing'() { @@ -213,6 +216,7 @@ class MongoClientSettingsSpecification extends Specification { .contextProvider(contextProvider) .dnsClient(dnsClient) .inetAddressResolver(inetAddressResolver) + .timeout(0, TimeUnit.SECONDS) .build() then: @@ -241,6 +245,30 @@ class MongoClientSettingsSpecification extends Specification { thrown(IllegalArgumentException) } + def 'should throw an exception if the timeout is invalid'() { + given: + def builder = MongoClientSettings.builder() + + when: + builder.timeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.timeout(-1, TimeUnit.SECONDS) + + then: + thrown(IllegalArgumentException) + + when: + def connectionString = new ConnectionString('mongodb://localhost/?timeoutMS=-1') + builder.applyConnectionString(connectionString).build() + + then: + thrown(IllegalStateException) + } + def 'should add command listeners'() { given: CommandListener commandListenerOne = Mock(CommandListener) @@ -308,6 +336,7 @@ class MongoClientSettingsSpecification extends Specification { + '&readConcernLevel=majority' + '&compressors=zlib&zlibCompressionLevel=5' + '&uuidRepresentation=standard' + + '&timeoutMS=10000' + '&proxyHost=proxy.com' + '&proxyPort=1080' + '&proxyUsername=username' @@ -370,6 +399,7 @@ class MongoClientSettingsSpecification extends Specification { .retryWrites(true) .retryReads(true) .uuidRepresentation(UuidRepresentation.STANDARD) + .timeout(10000, TimeUnit.MILLISECONDS) .build() then: @@ -525,7 +555,7 @@ class MongoClientSettingsSpecification extends Specification { 'heartbeatConnectTimeoutMS', 'heartbeatSocketTimeoutMS', 'inetAddressResolver', 'loggerSettingsBuilder', 'readConcern', 'readPreference', 'retryReads', 'retryWrites', 'serverApi', 'serverSettingsBuilder', 'socketSettingsBuilder', 'sslSettingsBuilder', - 'transportSettings', 'uuidRepresentation', 'writeConcern'] + 'timeoutMS', 'transportSettings', 'uuidRepresentation', 'writeConcern'] then: actual == expected @@ -540,7 +570,8 @@ class MongoClientSettingsSpecification extends Specification { 'applyToSslSettings', 'autoEncryptionSettings', 'build', 'codecRegistry', 'commandListenerList', 'compressorList', 'contextProvider', 'credential', 'dnsClient', 'heartbeatConnectTimeoutMS', 'heartbeatSocketTimeoutMS', 'inetAddressResolver', 'readConcern', 'readPreference', 'retryReads', 'retryWrites', - 'serverApi', 'transportSettings', 'uuidRepresentation', 'writeConcern'] + 'serverApi', 'timeout', 'transportSettings', 'uuidRepresentation', 'writeConcern'] + then: actual == expected } diff --git a/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy index 37e190432ff..5b3f35f42f1 100644 --- a/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy @@ -32,6 +32,24 @@ class TransactionOptionsSpecification extends Specification { options.getMaxCommitTime(TimeUnit.MILLISECONDS) == null } + def 'should throw an exception if the timeout is invalid'() { + given: + def builder = TransactionOptions.builder() + + + when: + builder.timeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.timeout(-1, TimeUnit.SECONDS).build() + + then: + thrown(IllegalArgumentException) + } + def 'should apply options set in builder'() { when: def options = TransactionOptions.builder() @@ -39,6 +57,7 @@ class TransactionOptionsSpecification extends Specification { .writeConcern(WriteConcern.JOURNALED) .readPreference(ReadPreference.secondary()) .maxCommitTime(5, TimeUnit.SECONDS) + .timeout(null, TimeUnit.MILLISECONDS) .build() then: @@ -47,6 +66,7 @@ class TransactionOptionsSpecification extends Specification { options.readPreference == ReadPreference.secondary() options.getMaxCommitTime(TimeUnit.MILLISECONDS) == 5000 options.getMaxCommitTime(TimeUnit.SECONDS) == 5 + options.getTimeout(TimeUnit.MILLISECONDS) == null } def 'should merge'() { @@ -56,12 +76,14 @@ class TransactionOptionsSpecification extends Specification { .writeConcern(WriteConcern.MAJORITY) .readPreference(ReadPreference.secondary()) .maxCommitTime(5, TimeUnit.SECONDS) + .timeout(123, TimeUnit.MILLISECONDS) .build() def third = TransactionOptions.builder() .readConcern(ReadConcern.LOCAL) .writeConcern(WriteConcern.W2) .readPreference(ReadPreference.nearest()) .maxCommitTime(10, TimeUnit.SECONDS) + .timeout(123, TimeUnit.MILLISECONDS) .build() expect: diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java b/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java index ac1d17db549..36e25cb534c 100644 --- a/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java @@ -80,6 +80,7 @@ public void testDefaults() { assertFalse(serverDescription.isSecondary()); assertEquals(0F, serverDescription.getRoundTripTimeNanos(), 0L); + assertEquals(0F, serverDescription.getMinRoundTripTimeNanos(), 0L); assertEquals(0x1000000, serverDescription.getMaxDocumentSize()); @@ -92,6 +93,7 @@ public void testDefaults() { assertNull(serverDescription.getSetName()); assertEquals(0, serverDescription.getMinWireVersion()); assertEquals(0, serverDescription.getMaxWireVersion()); + assertFalse(serverDescription.isCryptd()); assertNull(serverDescription.getElectionId()); assertNull(serverDescription.getSetVersion()); assertNull(serverDescription.getTopologyVersion()); @@ -112,6 +114,7 @@ public void testBuilder() { .setName("test") .maxDocumentSize(100) .roundTripTime(50000, java.util.concurrent.TimeUnit.NANOSECONDS) + .minRoundTripTime(10000, java.util.concurrent.TimeUnit.NANOSECONDS) .primary("localhost:27017") .canonicalAddress("localhost:27018") .hosts(new HashSet<>(asList("localhost:27017", @@ -131,6 +134,7 @@ public void testBuilder() { .lastUpdateTimeNanos(40000L) .logicalSessionTimeoutMinutes(30) .exception(exception) + .cryptd(true) .build(); @@ -147,6 +151,7 @@ public void testBuilder() { assertFalse(serverDescription.isSecondary()); assertEquals(50000, serverDescription.getRoundTripTimeNanos(), 0L); + assertEquals(10000, serverDescription.getMinRoundTripTimeNanos(), 0L); assertEquals(100, serverDescription.getMaxDocumentSize()); @@ -168,6 +173,7 @@ public void testBuilder() { assertEquals((Integer) 30, serverDescription.getLogicalSessionTimeoutMinutes()); assertEquals(exception, serverDescription.getException()); assertEquals(serverDescription, builder(serverDescription).build()); + assertTrue(serverDescription.isCryptd()); } @Test @@ -235,6 +241,9 @@ public void testObjectOverrides() { otherDescription = createBuilder().topologyVersion(new TopologyVersion(new ObjectId(), 44)).build(); assertNotEquals(builder.build(), otherDescription); + otherDescription = createBuilder().cryptd(true).build(); + assertNotEquals(builder.build(), otherDescription); + // test exception state changes assertNotEquals(createBuilder().exception(new IOException()).build(), createBuilder().exception(new RuntimeException()).build()); @@ -516,28 +525,4 @@ public void serverWithMaxWireVersionLessThanDriverMinWireVersionShouldBeIncompat assertFalse(serverDescription.isIncompatiblyNewerThanDriver()); assertTrue(serverDescription.isIncompatiblyOlderThanDriver()); } - - private static final ServerDescription SERVER_DESCRIPTION = builder() - .address(new ServerAddress()) - .type(ServerType.SHARD_ROUTER) - .tagSet(new TagSet(singletonList(new Tag("dc", "ny")))) - .setName("test") - .maxDocumentSize(100) - .roundTripTime(50000, TimeUnit.NANOSECONDS) - .primary("localhost:27017") - .canonicalAddress("localhost:27017") - .hosts(new HashSet<>(asList("localhost:27017", "localhost:27018"))) - .passives(new HashSet<>(singletonList("localhost:27019"))) - .arbiters(new HashSet<>(singletonList("localhost:27020"))) - .ok(true) - .state(CONNECTED) - .minWireVersion(1) - .lastWriteDate(new Date()) - .maxWireVersion(2) - .electionId(new ObjectId("abcdabcdabcdabcdabcdabcd")) - .setVersion(2) - .lastUpdateTimeNanos(1) - .lastWriteDate(new Date(42)) - .logicalSessionTimeoutMinutes(25) - .roundTripTime(56, TimeUnit.MILLISECONDS).build(); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java b/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java new file mode 100644 index 00000000000..130d408076e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java @@ -0,0 +1,353 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Mockito; + +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_COMMIT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT; +import static com.mongodb.ClusterFixture.sleep; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +final class TimeoutContextTest { + + public static long getMaxTimeMS(final TimeoutContext timeoutContext) { + long[] result = {0L}; + timeoutContext.runMaxTimeMS((ms) -> result[0] = ms); + return result[0]; + } + + @Test + @DisplayName("test defaults") + void testDefaults() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + assertEquals(0, timeoutContext.getMaxCommitTimeMS()); + assertEquals(0, timeoutContext.getReadTimeoutMS()); + } + + @Test + @DisplayName("Uses timeoutMS if set") + void testUsesTimeoutMSIfSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT); + + assertTrue(timeoutContext.hasTimeoutMS()); + assertTrue(getMaxTimeMS(timeoutContext) > 0); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("infinite timeoutMS") + void testInfiniteTimeoutMS() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT); + + assertTrue(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxTimeMS set") + void testMaxTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_TIME); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(100, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxAwaitTimeMS set") + void testMaxAwaitTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(101, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxTimeMS and MaxAwaitTimeMS set") + void testMaxTimeMSAndMaxAwaitTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(101, getMaxTimeMS(timeoutContext)); + assertEquals(1001, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxCommitTimeMS set") + void testMaxCommitTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_COMMIT); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + assertEquals(999L, timeoutContext.getMaxCommitTimeMS()); + } + + @Test + @DisplayName("All deprecated options set") + void testAllDeprecatedOptionsSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(101, getMaxTimeMS(timeoutContext)); + assertEquals(1001, timeoutContext.getMaxAwaitTimeMS()); + assertEquals(999, timeoutContext.getMaxCommitTimeMS()); + } + + @Test + @DisplayName("Use timeout if available or the alternative") + void testUseTimeoutIfAvailableOrTheAlternative() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + assertEquals(99L, timeoutContext.timeoutOrAlternative(99)); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(0L)); + assertEquals(0L, timeoutContext.timeoutOrAlternative(99)); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L)); + assertTrue(timeoutContext.timeoutOrAlternative(0) <= 999); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L)); + assertTrue(timeoutContext.timeoutOrAlternative(999999) <= 999); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + assertEquals(0, timeoutContext.getMaxCommitTimeMS()); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L)); + assertTrue(timeoutContext.getMaxCommitTimeMS() <= 999); + } + + @Test + @DisplayName("withAdditionalReadTimeout works as expected") + void testWithAdditionalReadTimeout() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(0)); + assertEquals(0L, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS()); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(10_000L)); + assertEquals(10_101L, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS()); + + long originalValue = Long.MAX_VALUE - 100; + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(originalValue)); + assertEquals(Long.MAX_VALUE, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS()); + + assertThrows(AssertionError.class, () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(0L)).withAdditionalReadTimeout(1)); + + assertThrows(AssertionError.class, () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(10_000L)).withAdditionalReadTimeout(1)); + } + + @Test + @DisplayName("Expired works as expected") + void testExpired() { + TimeoutContext smallTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(1L)); + TimeoutContext longTimeout = + new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(9999999L)); + TimeoutContext noTimeout = new TimeoutContext(TIMEOUT_SETTINGS); + sleep(100); + assertFalse(hasExpired(noTimeout.getTimeout())); + assertFalse(hasExpired(longTimeout.getTimeout())); + assertTrue(hasExpired(smallTimeout.getTimeout())); + } + + private static boolean hasExpired(@Nullable final Timeout timeout) { + return Timeout.nullAsInfinite(timeout).call(NANOSECONDS, () -> false, (ns) -> false, () -> true); + } + + @Test + @DisplayName("throws when calculating timeout if expired") + void testThrowsWhenExpired() { + TimeoutContext smallTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(1L)); + TimeoutContext longTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(9999999L)); + TimeoutContext noTimeout = new TimeoutContext(TIMEOUT_SETTINGS); + sleep(100); + + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getReadTimeoutMS); + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getWriteTimeoutMS); + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getConnectTimeoutMs); + assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(smallTimeout)); + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getMaxCommitTimeMS); + assertThrows(MongoOperationTimeoutException.class, () -> smallTimeout.timeoutOrAlternative(1)); + assertDoesNotThrow(longTimeout::getReadTimeoutMS); + assertDoesNotThrow(longTimeout::getWriteTimeoutMS); + assertDoesNotThrow(longTimeout::getConnectTimeoutMs); + assertDoesNotThrow(() -> getMaxTimeMS(longTimeout)); + assertDoesNotThrow(longTimeout::getMaxCommitTimeMS); + assertDoesNotThrow(() -> longTimeout.timeoutOrAlternative(1)); + assertDoesNotThrow(noTimeout::getReadTimeoutMS); + assertDoesNotThrow(noTimeout::getWriteTimeoutMS); + assertDoesNotThrow(noTimeout::getConnectTimeoutMs); + assertDoesNotThrow(() -> getMaxTimeMS(noTimeout)); + assertDoesNotThrow(noTimeout::getMaxCommitTimeMS); + assertDoesNotThrow(() -> noTimeout.timeoutOrAlternative(1)); + } + + @Test + @DisplayName("validates minRoundTripTime for maxTimeMS") + void testValidatedMinRoundTripTime() { + Supplier supplier = () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L)); + + assertTrue(getMaxTimeMS(supplier.get()) <= 100); + assertTrue(getMaxTimeMS(supplier.get().minRoundTripTimeMS(10)) <= 90); + assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(supplier.get().minRoundTripTimeMS(101))); + assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(supplier.get().minRoundTripTimeMS(100))); + } + + @Test + @DisplayName("Test createTimeoutContext handles legacy settings") + void testCreateTimeoutContextLegacy() { + TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS); + + ClientSession clientSession = Mockito.mock(ClientSession.class); + Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext); + + TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings()); + assertEquals(timeoutContext, actualTimeoutContext); + } + + @Test + @DisplayName("Test createTimeoutContext with timeout legacy settings") + void testCreateTimeoutContextWithTimeoutLegacy() { + TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS); + + ClientSession clientSession = Mockito.mock(ClientSession.class); + Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext); + + TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings()); + assertEquals(sessionTimeoutContext, actualTimeoutContext); + } + + @Test + @DisplayName("Test createTimeoutContext with timeout") + void testCreateTimeoutContextWithTimeout() { + TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT.withMaxAwaitTimeMS(123)); + + ClientSession clientSession = Mockito.mock(ClientSession.class); + Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext); + + TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings()); + assertEquals(sessionTimeoutContext, actualTimeoutContext); + } + + @Test + @DisplayName("should override maxTimeMS when MaxTimeSupplier is set") + void shouldOverrideMaximeMS() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L).withMaxTimeMS(1)); + + timeoutContext.setMaxTimeOverride(2L); + + assertEquals(2, getMaxTimeMS(timeoutContext)); + } + + @Test + @DisplayName("should reset maxTimeMS to default behaviour") + void shouldResetMaximeMS() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L).withMaxTimeMS(1)); + timeoutContext.setMaxTimeOverride(1L); + + timeoutContext.resetToDefaultMaxTime(); + + assertTrue(getMaxTimeMS(timeoutContext) > 1); + } + + static Stream shouldChooseConnectTimeoutWhenItIsLessThenTimeoutMs() { + return Stream.of( + //connectTimeoutMS, timeoutMS, expected + Arguments.of(500L, 1000L, 500L), + Arguments.of(0L, null, 0L), + Arguments.of(1000L, null, 1000L), + Arguments.of(1000L, 0L, 1000L), + Arguments.of(0L, 0L, 0L) + ); + } + + @ParameterizedTest + @MethodSource + @DisplayName("should choose connectTimeoutMS when connectTimeoutMS is less than timeoutMS") + void shouldChooseConnectTimeoutWhenItIsLessThenTimeoutMs(final Long connectTimeoutMS, + final Long timeoutMS, + final long expected) { + TimeoutContext timeoutContext = new TimeoutContext( + new TimeoutSettings(0, + connectTimeoutMS, + 0, + timeoutMS, + 0)); + + long calculatedTimeoutMS = timeoutContext.getConnectTimeoutMs(); + assertEquals(expected, calculatedTimeoutMS); + } + + + static Stream shouldChooseTimeoutMsWhenItIsLessThenConnectTimeoutMS() { + return Stream.of( + //connectTimeoutMS, timeoutMS, expected + Arguments.of(1000L, 1000L, 999), + Arguments.of(1000L, 500L, 499L), + Arguments.of(0L, 1000L, 999L) + ); + } + + @ParameterizedTest + @MethodSource + @DisplayName("should choose timeoutMS when timeoutMS is less than connectTimeoutMS") + void shouldChooseTimeoutMsWhenItIsLessThenConnectTimeoutMS(final Long connectTimeoutMS, + final Long timeoutMS, + final long expected) { + TimeoutContext timeoutContext = new TimeoutContext( + new TimeoutSettings(0, + connectTimeoutMS, + 0, + timeoutMS, + 0)); + + long calculatedTimeoutMS = timeoutContext.getConnectTimeoutMs(); + assertTrue(expected - calculatedTimeoutMS <= 1); + } + + private TimeoutContextTest() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java b/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java new file mode 100644 index 00000000000..71f63d32e6d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestFactory; + +import java.util.Collection; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +final class TimeoutSettingsTest { + + @TestFactory + Collection timeoutSettingsTest() { + return asList( + dynamicTest("test defaults", () -> { + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS; + assertAll( + () -> assertEquals(30_000, timeoutSettings.getServerSelectionTimeoutMS()), + () -> assertEquals(10_000, timeoutSettings.getConnectTimeoutMS()), + () -> assertEquals(0, timeoutSettings.getReadTimeoutMS()), + () -> assertNull(timeoutSettings.getTimeoutMS()), + () -> assertEquals(0, timeoutSettings.getMaxTimeMS()), + () -> assertEquals(0, timeoutSettings.getMaxAwaitTimeMS()), + () -> assertNull(timeoutSettings.getWTimeoutMS()) + ); + }), + dynamicTest("test overrides", () -> { + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS + .withTimeoutMS(100L) + .withMaxTimeMS(111) + .withMaxAwaitTimeMS(11) + .withMaxCommitMS(999L) + .withReadTimeoutMS(11_000) + .withWTimeoutMS(222L); + assertAll( + () -> assertEquals(30_000, timeoutSettings.getServerSelectionTimeoutMS()), + () -> assertEquals(10_000, timeoutSettings.getConnectTimeoutMS()), + () -> assertEquals(11_000, timeoutSettings.getReadTimeoutMS()), + () -> assertEquals(100, timeoutSettings.getTimeoutMS()), + () -> assertEquals(111, timeoutSettings.getMaxTimeMS()), + () -> assertEquals(11, timeoutSettings.getMaxAwaitTimeMS()), + () -> assertEquals(999, timeoutSettings.getMaxCommitTimeMS()), + () -> assertEquals(222, timeoutSettings.getWTimeoutMS()) + ); + }) + ); + } + + @Test + public void testTimeoutSettingsValidation() { + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withTimeoutMS(-1L)); + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withMaxAwaitTimeMS(-1)); + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withMaxTimeMS(-1)); + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withTimeoutMS(10L).withMaxAwaitTimeMS(11)); + } + + private TimeoutSettingsTest() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java index deb8e4a2e4a..20553fe881a 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java @@ -15,6 +15,8 @@ */ package com.mongodb.internal.async; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import org.junit.jupiter.api.Test; import java.util.function.BiConsumer; @@ -26,7 +28,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; final class AsyncFunctionsTest extends AsyncFunctionsTestAbstract { - + private static final TimeoutContext TIMEOUT_CONTEXT = new TimeoutContext(new TimeoutSettings(0, 0, 0, 0L, 0)); @Test void test1Method() { // the number of expected variations is often: 1 + N methods invoked @@ -684,6 +686,7 @@ void testRetryLoop() { }, (callback) -> { beginAsync().thenRunRetryingWhile( + TIMEOUT_CONTEXT, c -> async(plainTest(0) ? 1 : 2, c), e -> e.getMessage().equals("exception-1") ).finish(callback); diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java index bc071c9a4f4..970d87d33ed 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java @@ -15,11 +15,20 @@ */ package com.mongodb.internal.async.function; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.client.syncadapter.SupplyingCallback; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.function.LoopState.AttachmentKey; import com.mongodb.internal.operation.retry.AttachmentKeys; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertAll; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -28,11 +37,43 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Named.named; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.mock; final class RetryStateTest { - @Test - void unlimitedAttemptsAndAdvance() { - RetryState retryState = new RetryState(); + private static final TimeoutContext TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L, + 0L, null, 0L)); + + private static final TimeoutContext TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L, + 0L, 1L, 0L)); + + private static final TimeoutContext TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L, + 0L, 0L, 0L)); + private static final String EXPECTED_TIMEOUT_MESSAGE = "Retry attempt exceeded the timeout limit."; + + static Stream infiniteTimeout() { + return Stream.of( + arguments(named("Infinite timeoutMs", TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT)) + ); + } + + static Stream expiredTimeout() { + return Stream.of( + arguments(named("Expired timeoutMs", TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT)) + ); + } + + static Stream noTimeout() { + return Stream.of( + arguments(named("No timeoutMs", TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT)) + ); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void unlimitedAttemptsAndAdvance(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); assertAll( () -> assertTrue(retryState.isFirstAttempt()), () -> assertEquals(0, retryState.attempt()), @@ -57,7 +98,7 @@ void unlimitedAttemptsAndAdvance() { @Test void limitedAttemptsAndAdvance() { - RetryState retryState = new RetryState(0); + RetryState retryState = RetryState.withNonRetryableState(); RuntimeException attemptException = new RuntimeException() { }; assertAll( @@ -75,9 +116,10 @@ void limitedAttemptsAndAdvance() { ); } - @Test - void markAsLastAttemptAdvanceWithRuntimeException() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void markAsLastAttemptAdvanceWithRuntimeException(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); retryState.markAsLastAttempt(); assertTrue(retryState.isLastAttempt()); RuntimeException attemptException = new RuntimeException() { @@ -86,9 +128,10 @@ void markAsLastAttemptAdvanceWithRuntimeException() { () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> fail())); } - @Test - void markAsLastAttemptAdvanceWithError() { - RetryState retryState = new RetryState(); + @ParameterizedTest(name = "should advance with non-retryable error when marked as last attempt and : ''{0}''") + @MethodSource({"infiniteTimeout", "expiredTimeout", "noTimeout"}) + void markAsLastAttemptAdvanceWithError(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); retryState.markAsLastAttempt(); assertTrue(retryState.isLastAttempt()); Error attemptException = new Error() { @@ -97,32 +140,46 @@ void markAsLastAttemptAdvanceWithError() { () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> fail())); } - @Test - void breakAndThrowIfRetryAndFirstAttempt() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndThrowIfRetryAndFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); retryState.breakAndThrowIfRetryAnd(Assertions::fail); assertFalse(retryState.isLastAttempt()); } - @Test - void breakAndThrowIfRetryAndFalse() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndThrowIfRetryAndFalse(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); advance(retryState); retryState.breakAndThrowIfRetryAnd(() -> false); assertFalse(retryState.isLastAttempt()); } - @Test + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) void breakAndThrowIfRetryAndTrue() { - RetryState retryState = new RetryState(); + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT); advance(retryState); assertThrows(RuntimeException.class, () -> retryState.breakAndThrowIfRetryAnd(() -> true)); assertTrue(retryState.isLastAttempt()); } @Test - void breakAndThrowIfRetryIfPredicateThrows() { - RetryState retryState = new RetryState(); + void breakAndThrowIfRetryAndTrueWithExpiredTimeout() { + TimeoutContext tContextMock = mock(TimeoutContext.class); + + RetryState retryState = new RetryState(tContextMock); + advance(retryState); + assertThrows(RuntimeException.class, () -> retryState.breakAndThrowIfRetryAnd(() -> true)); + assertTrue(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndThrowIfRetryIfPredicateThrows(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); advance(retryState); RuntimeException e = new RuntimeException() { }; @@ -132,18 +189,20 @@ void breakAndThrowIfRetryIfPredicateThrows() { assertFalse(retryState.isLastAttempt()); } - @Test - void breakAndCompleteIfRetryAndFirstAttempt() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); SupplyingCallback callback = new SupplyingCallback<>(); assertFalse(retryState.breakAndCompleteIfRetryAnd(Assertions::fail, callback)); assertFalse(callback.completed()); assertFalse(retryState.isLastAttempt()); } - @Test - void breakAndCompleteIfRetryAndFalse() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndFalse(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); advance(retryState); SupplyingCallback callback = new SupplyingCallback<>(); assertFalse(retryState.breakAndCompleteIfRetryAnd(() -> false, callback)); @@ -151,9 +210,10 @@ void breakAndCompleteIfRetryAndFalse() { assertFalse(retryState.isLastAttempt()); } - @Test - void breakAndCompleteIfRetryAndTrue() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndTrue(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); advance(retryState); SupplyingCallback callback = new SupplyingCallback<>(); assertTrue(retryState.breakAndCompleteIfRetryAnd(() -> true, callback)); @@ -161,9 +221,10 @@ void breakAndCompleteIfRetryAndTrue() { assertTrue(retryState.isLastAttempt()); } - @Test - void breakAndCompleteIfRetryAndPredicateThrows() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndPredicateThrows(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); advance(retryState); Error e = new Error() { }; @@ -175,25 +236,89 @@ void breakAndCompleteIfRetryAndPredicateThrows() { assertFalse(retryState.isLastAttempt()); } - @Test - void advanceOrThrowPredicateFalse() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowPredicateFalse(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); RuntimeException attemptException = new RuntimeException() { }; assertThrows(attemptException.getClass(), () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> false)); } + @ParameterizedTest + @MethodSource({"infiniteTimeout"}) + @DisplayName("should rethrow detected timeout exception even if timeout in retry state is not expired") + void advanceReThrowDetectedTimeoutExceptionEvenIfTimeoutInRetryStateIsNotExpired(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + + MongoOperationTimeoutException expectedTimeoutException = TimeoutContext.createMongoTimeoutException("Server selection failed"); + MongoOperationTimeoutException actualTimeoutException = + assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException, + (e1, e2) -> expectedTimeoutException, + (rs, e) -> false)); + + Assertions.assertEquals(actualTimeoutException, expectedTimeoutException); + } + + @Test + @DisplayName("should throw timeout exception from retry, when transformer swallows original timeout exception") + void advanceThrowTimeoutExceptionWhenTransformerSwallowOriginalTimeoutException() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT); + RuntimeException previousAttemptException = new RuntimeException() { + }; + MongoOperationTimeoutException expectedTimeoutException = TimeoutContext.createMongoTimeoutException("Server selection failed"); + + retryState.advanceOrThrow(previousAttemptException, + (e1, e2) -> previousAttemptException, + (rs, e) -> true); + + MongoOperationTimeoutException actualTimeoutException = + assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException, + (e1, e2) -> previousAttemptException, + (rs, e) -> false)); + + Assertions.assertNotEquals(actualTimeoutException, expectedTimeoutException); + Assertions.assertEquals(EXPECTED_TIMEOUT_MESSAGE, actualTimeoutException.getMessage()); + Assertions.assertEquals(previousAttemptException, actualTimeoutException.getCause(), + "Retry timeout exception should have a cause if transformer returned non-timeout exception."); + } + + + @Test + @DisplayName("should throw original timeout exception from retry, when transformer returns original timeout exception") + void advanceThrowOriginalTimeoutExceptionWhenTransformerReturnsOriginalTimeoutException() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT); + RuntimeException previousAttemptException = new RuntimeException() { + }; + MongoOperationTimeoutException expectedTimeoutException = TimeoutContext + .createMongoTimeoutException("Server selection failed"); + + retryState.advanceOrThrow(previousAttemptException, + (e1, e2) -> previousAttemptException, + (rs, e) -> true); + + MongoOperationTimeoutException actualTimeoutException = + assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException, + (e1, e2) -> expectedTimeoutException, + (rs, e) -> false)); + + Assertions.assertEquals(actualTimeoutException, expectedTimeoutException); + Assertions.assertNull(actualTimeoutException.getCause(), + "Original timeout exception should not have a cause if transformer already returned timeout exception."); + } + @Test void advanceOrThrowPredicateTrueAndLastAttempt() { - RetryState retryState = new RetryState(0); + RetryState retryState = RetryState.withNonRetryableState(); Error attemptException = new Error() { }; assertThrows(attemptException.getClass(), () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> true)); } - @Test - void advanceOrThrowPredicateThrowsAfterFirstAttempt() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowPredicateThrowsAfterFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); RuntimeException predicateException = new RuntimeException() { }; RuntimeException attemptException = new RuntimeException() { @@ -206,8 +331,26 @@ void advanceOrThrowPredicateThrowsAfterFirstAttempt() { } @Test - void advanceOrThrowPredicateThrows() { - RetryState retryState = new RetryState(); + void advanceOrThrowPredicateThrowsTimeoutAfterFirstAttempt() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT); + RuntimeException predicateException = new RuntimeException() { + }; + RuntimeException attemptException = new MongoOperationTimeoutException(EXPECTED_TIMEOUT_MESSAGE); + MongoOperationTimeoutException mongoOperationTimeoutException = assertThrows(MongoOperationTimeoutException.class, + () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> { + assertTrue(rs.isFirstAttempt()); + assertEquals(attemptException, e); + throw predicateException; + })); + + assertEquals(EXPECTED_TIMEOUT_MESSAGE, mongoOperationTimeoutException.getMessage()); + assertNull(mongoOperationTimeoutException.getCause()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowPredicateThrows(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); RuntimeException firstAttemptException = new RuntimeException() { }; retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true); @@ -222,9 +365,10 @@ void advanceOrThrowPredicateThrows() { })); } - @Test - void advanceOrThrowTransformerThrowsAfterFirstAttempt() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout", "expiredTimeout"}) + void advanceOrThrowTransformerThrowsAfterFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); RuntimeException transformerException = new RuntimeException() { }; assertThrows(transformerException.getClass(), () -> retryState.advanceOrThrow(new AssertionError(), @@ -234,9 +378,10 @@ void advanceOrThrowTransformerThrowsAfterFirstAttempt() { (rs, e) -> fail())); } - @Test - void advanceOrThrowTransformerThrows() throws Throwable { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) //TODO mock? + void advanceOrThrowTransformerThrows(final TimeoutContext timeoutContext) throws Throwable { + RetryState retryState = new RetryState(timeoutContext); Error firstAttemptException = new Error() { }; retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true); @@ -249,9 +394,10 @@ void advanceOrThrowTransformerThrows() throws Throwable { (rs, e) -> fail())); } - @Test - void advanceOrThrowTransformAfterFirstAttempt() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowTransformAfterFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); RuntimeException attemptException = new RuntimeException() { }; RuntimeException transformerResult = new RuntimeException() { @@ -269,8 +415,32 @@ void advanceOrThrowTransformAfterFirstAttempt() { } @Test - void advanceOrThrowTransform() { - RetryState retryState = new RetryState(); + void advanceOrThrowTransformThrowsTimeoutExceptionAfterFirstAttempt() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT); + + RuntimeException attemptException = new MongoOperationTimeoutException(EXPECTED_TIMEOUT_MESSAGE); + RuntimeException transformerResult = new RuntimeException(); + + MongoOperationTimeoutException mongoOperationTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> retryState.advanceOrThrow(attemptException, + (e1, e2) -> { + assertNull(e1); + assertEquals(attemptException, e2); + return transformerResult; + }, + (rs, e) -> { + assertEquals(attemptException, e); + return false; + })); + + assertEquals(EXPECTED_TIMEOUT_MESSAGE, mongoOperationTimeoutException.getMessage()); + assertEquals(transformerResult, mongoOperationTimeoutException.getCause()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowTransform(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); RuntimeException firstAttemptException = new RuntimeException() { }; retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true); @@ -290,9 +460,10 @@ void advanceOrThrowTransform() { })); } - @Test - void attachAndAttachment() { - RetryState retryState = new RetryState(); + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void attachAndAttachment(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); AttachmentKey attachmentKey = AttachmentKeys.maxWireVersion(); int attachmentValue = 1; assertFalse(retryState.attachment(attachmentKey).isPresent()); diff --git a/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy index 7cbd37bb862..824a724ee81 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy @@ -18,17 +18,16 @@ package com.mongodb.internal.binding import com.mongodb.ReadPreference import com.mongodb.ServerAddress -import com.mongodb.ServerApi -import com.mongodb.ServerApiVersion import com.mongodb.connection.ServerConnectionState import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerType -import com.mongodb.internal.IgnorableRequestContext import com.mongodb.internal.connection.Cluster import com.mongodb.internal.connection.Server import com.mongodb.internal.connection.ServerTuple import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + class SingleServerBindingSpecification extends Specification { def 'should implement getters'() { @@ -42,26 +41,22 @@ class SingleServerBindingSpecification extends Specification { .build()) } def address = new ServerAddress() - def serverApi = ServerApi.builder().version(ServerApiVersion.V1).build() + def operationContext = OPERATION_CONTEXT when: - def binding = new SingleServerBinding(cluster, address, serverApi, IgnorableRequestContext.INSTANCE) + + def binding = new SingleServerBinding(cluster, address, operationContext) then: binding.readPreference == ReadPreference.primary() - binding.serverApi == serverApi - - when: - def source = binding.getReadConnectionSource() + binding.getOperationContext() == operationContext - then: - source.serverApi == serverApi when: - source = binding.getWriteConnectionSource() + def source = binding.getReadConnectionSource() then: - source.serverApi == serverApi + source.getOperationContext() == operationContext } def 'should increment and decrement reference counts'() { @@ -77,7 +72,7 @@ class SingleServerBindingSpecification extends Specification { def address = new ServerAddress() when: - def binding = new SingleServerBinding(cluster, address, null, IgnorableRequestContext.INSTANCE) + def binding = new SingleServerBinding(cluster, address, OPERATION_CONTEXT) then: binding.count == 1 diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java index cc2aa11f74a..5b2cb1ab5f6 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java @@ -80,9 +80,12 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; import static com.mongodb.assertions.Assertions.assertFalse; import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.Arrays.asList; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -138,22 +141,22 @@ public void setUp() { settingsBuilder.minSize(poolOptions.getNumber("minPoolSize").intValue()); } if (poolOptions.containsKey("maxIdleTimeMS")) { - settingsBuilder.maxConnectionIdleTime(poolOptions.getNumber("maxIdleTimeMS").intValue(), TimeUnit.MILLISECONDS); + settingsBuilder.maxConnectionIdleTime(poolOptions.getNumber("maxIdleTimeMS").intValue(), MILLISECONDS); } if (poolOptions.containsKey("waitQueueTimeoutMS")) { - settingsBuilder.maxWaitTime(poolOptions.getNumber("waitQueueTimeoutMS").intValue(), TimeUnit.MILLISECONDS); + settingsBuilder.maxWaitTime(poolOptions.getNumber("waitQueueTimeoutMS").intValue(), MILLISECONDS); } if (poolOptions.containsKey("backgroundThreadIntervalMS")) { long intervalMillis = poolOptions.getNumber("backgroundThreadIntervalMS").longValue(); assertFalse(intervalMillis == 0); if (intervalMillis < 0) { - settingsBuilder.maintenanceInitialDelay(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + settingsBuilder.maintenanceInitialDelay(Long.MAX_VALUE, MILLISECONDS); } else { /* Using frequency/period instead of an interval as required by the specification is incorrect, for example, * because it opens up a possibility to run the background thread non-stop if runs are as long as or longer than the period. * Nevertheless, I am reusing what we already have in the driver instead of clogging up the implementation. */ settingsBuilder.maintenanceFrequency( - poolOptions.getNumber("backgroundThreadIntervalMS").longValue(), TimeUnit.MILLISECONDS); + poolOptions.getNumber("backgroundThreadIntervalMS").longValue(), MILLISECONDS); } } if (poolOptions.containsKey("maxConnecting")) { @@ -171,7 +174,7 @@ public void setUp() { case UNIT: { ServerId serverId = new ServerId(new ClusterId(), new ServerAddress("host1")); pool = new DefaultConnectionPool(serverId, new TestInternalConnectionFactory(), settings, internalSettings, - SameObjectProvider.initialized(mock(SdamServerDescriptionManager.class))); + SameObjectProvider.initialized(mock(SdamServerDescriptionManager.class)), OPERATION_CONTEXT_FACTORY); break; } case INTEGRATION: { @@ -190,7 +193,7 @@ public void setUp() { new TestCommandListener(), ClusterFixture.getServerApi() ), - settings, internalSettings, sdamProvider)); + settings, internalSettings, sdamProvider, OPERATION_CONTEXT_FACTORY)); sdamProvider.initialize(new DefaultSdamServerDescriptionManager(mockedCluster(), serverId, mock(ServerListener.class), mock(ServerMonitor.class), pool, connectionMode)); setFailPoint(); @@ -244,7 +247,7 @@ public void shouldPassAllOutcomes() throws Exception { assumeNotNull(eventClass); long timeoutMillis = operation.getNumber("timeout", new BsonInt64(TimeUnit.SECONDS.toMillis(5))) .longValue(); - listener.waitForEvent(eventClass, operation.getNumber("count").intValue(), timeoutMillis, TimeUnit.MILLISECONDS); + listener.waitForEvent(eventClass, operation.getNumber("count").intValue(), timeoutMillis, MILLISECONDS); } else if (name.equals("clear")) { pool.invalidate(null); } else if (name.equals("ready")) { @@ -383,6 +386,10 @@ private void assertReasonMatch(final BsonDocument expectedEvent, final Connectio } } + protected OperationContext createOperationContext() { + return ClusterFixture.createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(settings.getMaxWaitTime(MILLISECONDS))); + } + private void assertReasonMatch(final BsonDocument expectedEvent, final ConnectionCheckOutFailedEvent connectionCheckOutFailedEvent) { if (!expectedEvent.containsKey("reason")) { return; @@ -528,7 +535,8 @@ private Event getNextEvent(final Iterator eventsIterator, final } private static void executeAdminCommand(final BsonDocument command) { - new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(ClusterFixture.getBinding()); + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) + .execute(ClusterFixture.getBinding()); } private void setFailPoint() { @@ -624,13 +632,6 @@ public InternalConnection get(final OperationContext operationContext) { return result; } - @Override - public InternalConnection get(final OperationContext operationContext, final long timeout, final TimeUnit timeUnit) { - InternalConnection result = pool.get(new OperationContext(), timeout, timeUnit); - updateConnectionIdLocalValueAdjustment(result); - return result; - } - @Override public void getAsync(final OperationContext operationContext, final SingleResultCallback callback) { pool.getAsync(operationContext, (result, problem) -> { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java index c0924c3d74d..6fe76d0198a 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java @@ -27,7 +27,9 @@ import com.mongodb.connection.ServerDescription; import com.mongodb.connection.ServerType; import com.mongodb.event.ClusterListener; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.connection.SdamServerDescriptionManager.SdamIssue; +import com.mongodb.internal.time.Timeout; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonInt32; @@ -42,6 +44,8 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; import static com.mongodb.connection.ServerConnectionState.CONNECTING; import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription; import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException; @@ -73,26 +77,29 @@ protected void applyResponse(final BsonArray response) { if (helloResult.isEmpty()) { serverDescription = ServerDescription.builder().type(ServerType.UNKNOWN).state(CONNECTING).address(serverAddress).build(); } else { - serverDescription = createServerDescription(serverAddress, helloResult, 5000000); + serverDescription = createServerDescription(serverAddress, helloResult, 5000000, 0); } factory.sendNotification(serverAddress, serverDescription); } protected void applyApplicationError(final BsonDocument applicationError) { + Timeout serverSelectionTimeout = OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(); ServerAddress serverAddress = new ServerAddress(applicationError.getString("address").getValue()); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); int errorGeneration = applicationError.getNumber("generation", - new BsonInt32(((DefaultServer) getCluster().getServersSnapshot().getServer(serverAddress)) + new BsonInt32(((DefaultServer) getCluster().getServersSnapshot(serverSelectionTimeout, timeoutContext).getServer(serverAddress)) .getConnectionPool().getGeneration())).intValue(); int maxWireVersion = applicationError.getNumber("maxWireVersion").intValue(); String when = applicationError.getString("when").getValue(); String type = applicationError.getString("type").getValue(); - DefaultServer server = (DefaultServer) cluster.getServersSnapshot().getServer(serverAddress); + DefaultServer server = (DefaultServer) cluster.getServersSnapshot(serverSelectionTimeout, timeoutContext).getServer(serverAddress); RuntimeException exception; switch (type) { case "command": - exception = getCommandFailureException(applicationError.getDocument("response"), serverAddress); + exception = getCommandFailureException(applicationError.getDocument("response"), serverAddress, + OPERATION_CONTEXT.getTimeoutContext()); break; case "network": exception = new MongoSocketReadException("Read error", serverAddress, new IOException()); diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy index 0f51bab44a8..50f78639168 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy @@ -31,14 +31,19 @@ import com.mongodb.connection.ServerConnectionState import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerType import com.mongodb.event.ServerDescriptionChangedEvent +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.selector.ReadPreferenceServerSelector import com.mongodb.internal.selector.ServerAddressSelector import com.mongodb.internal.selector.WritableServerSelector +import com.mongodb.internal.time.Timeout import spock.lang.Specification import util.spock.annotations.Slow import java.util.concurrent.CountDownLatch +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.createOperationContext import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE import static com.mongodb.connection.ClusterSettings.builder import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY @@ -61,7 +66,6 @@ class BaseClusterSpecification extends Specification { given: def clusterSettings = builder().mode(MULTIPLE) .hosts([firstServer, secondServer, thirdServer]) - .serverSelectionTimeout(1, MILLISECONDS) .serverSelector(new ServerAddressSelector(firstServer)) .build() def cluster = new BaseCluster(new ClusterId(), clusterSettings, factory) { @@ -70,7 +74,7 @@ class BaseClusterSpecification extends Specification { } @Override - Cluster.ServersSnapshot getServersSnapshot() { + Cluster.ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) { Cluster.ServersSnapshot result = { serverAddress -> throw new UnsupportedOperationException() } @@ -87,7 +91,17 @@ class BaseClusterSpecification extends Specification { factory.getSettings()) when: 'a server is selected before initialization' - cluster.selectServer({ def clusterDescription -> [] }, new OperationContext()) + cluster.selectServer({ def clusterDescription -> [] }, + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(1))) + + then: 'a MongoTimeoutException is thrown' + thrown(MongoTimeoutException) + + when: 'a server is selected before initialization and timeoutMS is set' + cluster.selectServer({ def clusterDescription -> [] }, + createOperationContext(TIMEOUT_SETTINGS + .withServerSelectionTimeoutMS(1) + .withTimeout(1, MILLISECONDS))) then: 'a MongoTimeoutException is thrown' thrown(MongoTimeoutException) @@ -120,7 +134,7 @@ class BaseClusterSpecification extends Specification { factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers) expect: - cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), new OperationContext()) + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), OPERATION_CONTEXT) .serverDescription.address == firstServer } @@ -128,7 +142,6 @@ class BaseClusterSpecification extends Specification { given: def cluster = new MultiServerCluster(new ClusterId(), builder().mode(MULTIPLE) - .serverSelectionTimeout(1, SECONDS) .hosts([firstServer, secondServer, thirdServer]) .build(), factory) @@ -137,7 +150,9 @@ class BaseClusterSpecification extends Specification { factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers) expect: - cluster.selectServer(new ServerAddressSelector(firstServer), new OperationContext()).serverDescription.address == firstServer + cluster.selectServer(new ServerAddressSelector(firstServer), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(1_000))) + .serverDescription.address == firstServer } def 'should apply local threshold when custom server selector is present'() { @@ -155,7 +170,7 @@ class BaseClusterSpecification extends Specification { factory.sendNotification(thirdServer, 1, REPLICA_SET_PRIMARY, allServers) expect: - cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.nearest()), new OperationContext()) + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.nearest()), OPERATION_CONTEXT) .serverDescription.address == firstServer } @@ -173,7 +188,7 @@ class BaseClusterSpecification extends Specification { factory.sendNotification(thirdServer, 1, REPLICA_SET_PRIMARY, allServers) expect: // firstServer is the only secondary within the latency threshold - cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), new OperationContext()) + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), OPERATION_CONTEXT) .serverDescription.address == firstServer } @@ -182,7 +197,6 @@ class BaseClusterSpecification extends Specification { def cluster = new MultiServerCluster(new ClusterId(), builder().mode(MULTIPLE) .hosts([firstServer, secondServer]) - .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS) .build(), factory) @@ -193,10 +207,12 @@ class BaseClusterSpecification extends Specification { .exception(new MongoInternalException('oops')) .build()) - cluster.selectServer(new WritableServerSelector(), new OperationContext()) + cluster.selectServer(new WritableServerSelector(), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS))) then: def e = thrown(MongoTimeoutException) + e.getMessage().startsWith("Timed out while waiting for a server " + 'that matches WritableServerSelector. Client view of cluster state is {type=UNKNOWN') e.getMessage().contains('{address=localhost:27017, type=UNKNOWN, state=CONNECTING, ' + @@ -212,7 +228,6 @@ class BaseClusterSpecification extends Specification { def cluster = new MultiServerCluster(new ClusterId(), builder().mode(MULTIPLE) .hosts([firstServer, secondServer, thirdServer]) - .serverSelectionTimeout(serverSelectionTimeoutMS, SECONDS) .build(), factory) factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers) @@ -220,7 +235,8 @@ class BaseClusterSpecification extends Specification { factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers) expect: - cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), new OperationContext()) + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS))) .serverDescription.address == thirdServer cleanup: @@ -236,7 +252,6 @@ class BaseClusterSpecification extends Specification { def cluster = new MultiServerCluster(new ClusterId(), builder().mode(MULTIPLE) .hosts([firstServer, secondServer, thirdServer]) - .serverSelectionTimeout(-1, SECONDS) .build(), factory) @@ -244,7 +259,8 @@ class BaseClusterSpecification extends Specification { def latch = new CountDownLatch(1) def thread = new Thread({ try { - cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), new OperationContext()) + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(-1_000))) } catch (MongoInterruptedException e) { latch.countDown() } @@ -265,14 +281,13 @@ class BaseClusterSpecification extends Specification { given: def cluster = new MultiServerCluster(new ClusterId(), builder().mode(MULTIPLE) - .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS) .hosts([firstServer, secondServer, thirdServer]) .build(), factory) factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers) when: - def serverDescription = selectServerAsync(cluster, firstServer).getDescription() + def serverDescription = selectServerAsync(cluster, firstServer, serverSelectionTimeoutMS).getDescription() then: serverDescription.address == firstServer @@ -288,14 +303,13 @@ class BaseClusterSpecification extends Specification { given: def cluster = new MultiServerCluster(new ClusterId(), builder().mode(MULTIPLE) - .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS) .hosts([firstServer, secondServer, thirdServer]) .build(), factory) when: - def secondServerLatch = selectServerAsync(cluster, secondServer) - def thirdServerLatch = selectServerAsync(cluster, thirdServer) + def secondServerLatch = selectServerAsync(cluster, secondServer, serverSelectionTimeoutMS) + def thirdServerLatch = selectServerAsync(cluster, thirdServer, serverSelectionTimeoutMS) factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, allServers) factory.sendNotification(thirdServer, REPLICA_SET_SECONDARY, allServers) @@ -335,12 +349,11 @@ class BaseClusterSpecification extends Specification { def cluster = new MultiServerCluster(new ClusterId(), builder().mode(MULTIPLE) .hosts([firstServer, secondServer, thirdServer]) - .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS) .build(), factory) when: - selectServerAsyncAndGet(cluster, firstServer) + selectServerAsyncAndGet(cluster, firstServer, serverSelectionTimeoutMS) then: thrown(MongoTimeoutException) @@ -354,12 +367,21 @@ class BaseClusterSpecification extends Specification { } def selectServerAsyncAndGet(BaseCluster cluster, ServerAddress serverAddress) { - selectServerAsync(cluster, serverAddress).get() + selectServerAsync(cluster, serverAddress, 1_000) + } + + def selectServerAsyncAndGet(BaseCluster cluster, ServerAddress serverAddress, long serverSelectionTimeoutMS) { + selectServerAsync(cluster, serverAddress, serverSelectionTimeoutMS).get() } def selectServerAsync(BaseCluster cluster, ServerAddress serverAddress) { + selectServerAsync(cluster, serverAddress, 1_000) + } + + def selectServerAsync(BaseCluster cluster, ServerAddress serverAddress, long serverSelectionTimeoutMS) { def serverLatch = new ServerLatch() - cluster.selectServerAsync(new ServerAddressSelector(serverAddress), new OperationContext()) { + cluster.selectServerAsync(new ServerAddressSelector(serverAddress), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS))) { ServerTuple result, MongoException e -> serverLatch.server = result != null ? result.getServer() : null serverLatch.serverDescription = result != null ? result.serverDescription : null diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java index 641f814a6dd..1cba6d91c3c 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java @@ -15,6 +15,7 @@ */ package com.mongodb.internal.connection; +import com.mongodb.ClusterFixture; import com.mongodb.ServerAddress; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ClusterDescription; @@ -47,7 +48,7 @@ void selectServerToleratesWhenThereIsNoServerForTheSelectedAddress() { new ServerAddressSelector(serverAddressA), clusterDescriptionAB, serversSnapshotB, - new OperationContext().getServerDeprioritization(), + ClusterFixture.OPERATION_CONTEXT.getServerDeprioritization(), ClusterSettings.builder().build())); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java new file mode 100644 index 00000000000..f7873379c3b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.MongoCommandException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.ServerApiVersion; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import org.bson.BsonDocument; +import org.bson.codecs.Decoder; +import org.junit.jupiter.api.Test; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.connection.ClusterConnectionMode.SINGLE; +import static com.mongodb.internal.connection.CommandHelper.executeCommand; +import static com.mongodb.internal.connection.CommandHelper.executeCommandAsync; +import static com.mongodb.internal.connection.CommandHelper.executeCommandWithoutCheckingForFailure; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class CommandHelperTest { + + static final BsonDocument COMMAND = BsonDocument.parse("{ping: 1}"); + static final BsonDocument OK = BsonDocument.parse("{ok: 1}"); + static final BsonDocument NOT_OK = BsonDocument.parse("{ok: 0, errmsg: 'error'}"); + + static final ConnectionDescription CONNECTION_DESCRIPTION = new ConnectionDescription( + new ServerId(new ClusterId("cluster"), new ServerAddress())); + + @Test + @SuppressWarnings("unchecked") + void testExecuteCommand() { + InternalConnection internalConnection = mock(InternalConnection.class); + ServerDescription serverDescription = mock(ServerDescription.class); + OperationContext operationContext = createOperationContext(); + + + when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION); + when(internalConnection.sendAndReceive(any(), any(), any())).thenReturn(OK); + when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription); + + assertEquals(OK, + executeCommand("admin", COMMAND, SINGLE, operationContext.getServerApi(), internalConnection, operationContext)); + + verify(internalConnection).sendAndReceive(any(CommandMessage.class), any(Decoder.class), eq(operationContext)); + } + + @Test + @SuppressWarnings("unchecked") + void testExecuteCommandWithoutCheckingForFailure() { + InternalConnection internalConnection = mock(InternalConnection.class); + ServerDescription serverDescription = mock(ServerDescription.class); + OperationContext operationContext = createOperationContext(); + + when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION); + when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription); + when(internalConnection.sendAndReceive(any(), any(), any())) + .thenThrow(new MongoCommandException(NOT_OK, new ServerAddress())); + + assertEquals(new BsonDocument(), + executeCommandWithoutCheckingForFailure("admin", COMMAND, SINGLE, operationContext.getServerApi(), + internalConnection, operationContext)); + + verify(internalConnection).sendAndReceive(any(CommandMessage.class), any(Decoder.class), eq(operationContext)); + } + + + @Test + @SuppressWarnings("unchecked") + void testExecuteCommandAsyncUsesTheOperationContext() { + InternalConnection internalConnection = mock(InternalConnection.class); + OperationContext operationContext = createOperationContext(); + ServerDescription serverDescription = mock(ServerDescription.class); + + when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription); + when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION); + when(internalConnection.sendAndReceive(any(), any(), any())).thenReturn(OK); + + executeCommandAsync("admin", COMMAND, SINGLE, operationContext.getServerApi(), internalConnection, operationContext, + (r, t) -> {}); + + verify(internalConnection).sendAndReceiveAsync(any(CommandMessage.class), any(Decoder.class), eq(operationContext), any()); + } + + @Test + void testIsCommandOk() { + assertTrue(CommandHelper.isCommandOk(OK)); + assertTrue(CommandHelper.isCommandOk(BsonDocument.parse("{ok: true}"))); + assertFalse(CommandHelper.isCommandOk(NOT_OK)); + assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: false}"))); + assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: 11}"))); + assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: 'nope'}"))); + assertFalse(CommandHelper.isCommandOk(new BsonDocument())); + } + + + OperationContext createOperationContext() { + return new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(TimeoutSettings.DEFAULT), ServerApi.builder().version(ServerApiVersion.V1).build()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy index edc6e92c30e..427fe23c613 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy @@ -22,6 +22,8 @@ import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.connection.ClusterConnectionMode import com.mongodb.connection.ServerType +import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.bulk.InsertRequest import com.mongodb.internal.bulk.WriteRequestWithIndex import com.mongodb.internal.session.SessionContext @@ -63,7 +65,7 @@ class CommandMessageSpecification extends Specification { def output = new BasicOutputBuffer() when: - message.encode(output, sessionContext) + message.encode(output, operationContext) then: def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) @@ -77,11 +79,11 @@ class CommandMessageSpecification extends Specification { def expectedCommandDocument = command.clone() .append('$db', new BsonString(namespace.databaseName)) - if (sessionContext.clusterTime != null) { - expectedCommandDocument.append('$clusterTime', sessionContext.clusterTime) + if (operationContext.getSessionContext().clusterTime != null) { + expectedCommandDocument.append('$clusterTime', operationContext.getSessionContext().clusterTime) } - if (sessionContext.hasSession() && responseExpected) { - expectedCommandDocument.append('lsid', sessionContext.sessionId) + if (operationContext.getSessionContext().hasSession() && responseExpected) { + expectedCommandDocument.append('lsid', operationContext.getSessionContext().sessionId) } if (readPreference != ReadPreference.primary()) { @@ -92,35 +94,44 @@ class CommandMessageSpecification extends Specification { getCommandDocument(byteBuf, replyHeader) == expectedCommandDocument where: - [readPreference, serverType, clusterConnectionMode, sessionContext, responseExpected] << [ + [readPreference, serverType, clusterConnectionMode, operationContext, responseExpected, isCryptd] << [ [ReadPreference.primary(), ReadPreference.secondary()], [ServerType.REPLICA_SET_PRIMARY, ServerType.SHARD_ROUTER], [ClusterConnectionMode.SINGLE, ClusterConnectionMode.MULTIPLE], [ - Stub(SessionContext) { - hasSession() >> false - getClusterTime() >> null - getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) - getReadConcern() >> ReadConcern.DEFAULT - }, - Stub(SessionContext) { - hasSession() >> false - getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1)) - getReadConcern() >> ReadConcern.DEFAULT - }, - Stub(SessionContext) { - hasSession() >> true - getClusterTime() >> null - getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) - getReadConcern() >> ReadConcern.DEFAULT - }, - Stub(SessionContext) { - hasSession() >> true - getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1)) - getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) - getReadConcern() >> ReadConcern.DEFAULT - } + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> false + getClusterTime() >> null + getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null), + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> false + getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1)) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null), + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> true + getClusterTime() >> null + getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null), + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> true + getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1)) + getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null) ], + [true, false], [true, false] ].combinations() } @@ -141,7 +152,8 @@ class CommandMessageSpecification extends Specification { MessageSettings.builder().maxWireVersion(maxWireVersion).build(), true, payload, new NoOpFieldNameValidator(), ClusterConnectionMode.MULTIPLE, null) def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) - message.encode(output, NoOpSessionContext.INSTANCE) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) when: def commandDocument = message.getCommandDocument(output) @@ -190,7 +202,8 @@ class CommandMessageSpecification extends Specification { } when: - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), null)) def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) def messageHeader = new MessageHeader(byteBuf, maxMessageSize) @@ -208,7 +221,7 @@ class CommandMessageSpecification extends Specification { message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null)) byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) messageHeader = new MessageHeader(byteBuf, maxMessageSize) @@ -226,7 +239,7 @@ class CommandMessageSpecification extends Specification { message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null)) byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) messageHeader = new MessageHeader(byteBuf, maxMessageSize) @@ -244,7 +257,10 @@ class CommandMessageSpecification extends Specification { message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, + sessionContext, + Stub(TimeoutContext), + null)) byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) messageHeader = new MessageHeader(byteBuf, maxMessageSize) @@ -273,7 +289,9 @@ class CommandMessageSpecification extends Specification { } when: - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), + null)) def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) def messageHeader = new MessageHeader(byteBuf, 2048) @@ -291,7 +309,8 @@ class CommandMessageSpecification extends Specification { message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), null)) byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) messageHeader = new MessageHeader(byteBuf, 1024) @@ -318,7 +337,8 @@ class CommandMessageSpecification extends Specification { } when: - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), null)) then: thrown(BsonMaximumSizeExceededException) @@ -338,7 +358,8 @@ class CommandMessageSpecification extends Specification { } when: - message.encode(output, sessionContext) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), null)) then: thrown(MongoClientException) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java new file mode 100644 index 00000000000..f08086be5e8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.FieldNameValidator; +import org.bson.io.BasicOutputBuffer; +import org.junit.jupiter.api.Test; + +import static com.mongodb.internal.mockito.MongoMockito.mock; +import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_ZERO_WIRE_VERSION; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +class CommandMessageTest { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("db.test"); + private static final BsonDocument COMMAND = new BsonDocument("find", new BsonString(NAMESPACE.getCollectionName())); + private static final FieldNameValidator FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); + + @Test + void encodeShouldThrowTimeoutExceptionWhenTimeoutContextIsCalled() { + //given + CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, FIELD_NAME_VALIDATOR, ReadPreference.primary(), + MessageSettings.builder() + .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION) + .serverType(ServerType.REPLICA_SET_SECONDARY) + .sessionSupported(true) + .build(), + true, null, null, ClusterConnectionMode.MULTIPLE, null); + + BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); + SessionContext sessionContext = mock(SessionContext.class); + TimeoutContext timeoutContext = mock(TimeoutContext.class, mock -> { + doThrow(new MongoOperationTimeoutException("test")).when(mock).runMaxTimeMS(any()); + }); + OperationContext operationContext = mock(OperationContext.class, mock -> { + when(mock.getSessionContext()).thenReturn(sessionContext); + when(mock.getTimeoutContext()).thenReturn(timeoutContext); + }); + + //when & then + assertThrows(MongoOperationTimeoutException.class, () -> + commandMessage.encode(bsonOutput, operationContext)); + } + + @Test + void encodeShouldNotAddExtraElementsFromTimeoutContextWhenConnectedToMongoCrypt() { + //given + CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, FIELD_NAME_VALIDATOR, ReadPreference.primary(), + MessageSettings.builder() + .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION) + .serverType(ServerType.REPLICA_SET_SECONDARY) + .sessionSupported(true) + .cryptd(true) + .build(), + true, null, null, ClusterConnectionMode.MULTIPLE, null); + + BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); + SessionContext sessionContext = mock(SessionContext.class, mock -> { + when(mock.getClusterTime()).thenReturn(new BsonDocument("clusterTime", new BsonTimestamp(42, 1))); + when(mock.hasSession()).thenReturn(false); + when(mock.getReadConcern()).thenReturn(ReadConcern.DEFAULT); + when(mock.notifyMessageSent()).thenReturn(true); + when(mock.hasActiveTransaction()).thenReturn(false); + when(mock.isSnapshot()).thenReturn(false); + }); + TimeoutContext timeoutContext = mock(TimeoutContext.class); + OperationContext operationContext = mock(OperationContext.class, mock -> { + when(mock.getSessionContext()).thenReturn(sessionContext); + when(mock.getTimeoutContext()).thenReturn(timeoutContext); + }); + + //when + commandMessage.encode(bsonOutput, operationContext); + + //then + verifyNoInteractions(timeoutContext); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java index b8574081f5c..1006b10665b 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java @@ -45,7 +45,7 @@ protected Callable createCallable(final BsonDocument operation) { FutureResultCallback callback = new FutureResultCallback<>(); return () -> { try { - getPool().getAsync(new OperationContext(), (connection, t) -> { + getPool().getAsync(createOperationContext(), (connection, t) -> { if (t != null) { callback.onResult(null, t); } else { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java index b5b449c755d..425a5da0fcb 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java @@ -43,7 +43,7 @@ protected Callable createCallable(final BsonDocument operation) { if (name.equals("checkOut")) { return () -> { try { - InternalConnection connection = getPool().get(new OperationContext()); + InternalConnection connection = getPool().get(createOperationContext()); if (operation.containsKey("label")) { getConnectionMap().put(operation.getString("label").getValue(), connection); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy index ecbdb2c55ab..fe251d34311 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy @@ -16,6 +16,7 @@ package com.mongodb.internal.connection +import com.mongodb.ClusterFixture import com.mongodb.MongoConnectionPoolClearedException import com.mongodb.MongoServerUnavailableException import com.mongodb.MongoTimeoutException @@ -26,7 +27,6 @@ import com.mongodb.connection.ConnectionId import com.mongodb.connection.ServerId import com.mongodb.event.ConnectionCheckOutFailedEvent import com.mongodb.event.ConnectionPoolListener -import com.mongodb.internal.async.SingleResultCallback import com.mongodb.internal.inject.EmptyProvider import com.mongodb.internal.inject.SameObjectProvider import com.mongodb.internal.logging.LogMessage @@ -41,6 +41,10 @@ import java.util.concurrent.CountDownLatch import java.util.regex.Matcher import java.util.regex.Pattern +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.createOperationContext import static com.mongodb.connection.ConnectionPoolSettings.builder import static java.util.concurrent.TimeUnit.MILLISECONDS import static java.util.concurrent.TimeUnit.MINUTES @@ -70,22 +74,22 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should get non null connection'() throws InterruptedException { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() expect: - pool.get(new OperationContext()) != null + pool.get(OPERATION_CONTEXT) != null } def 'should reuse released connection'() throws InterruptedException { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: - pool.get(new OperationContext()).close() - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT).close() + pool.get(OPERATION_CONTEXT) then: 1 * connectionFactory.create(SERVER_ID, _) @@ -94,11 +98,11 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should release a connection back into the pool on close, not close the underlying connection'() throws InterruptedException { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: - pool.get(new OperationContext()).close() + pool.get(OPERATION_CONTEXT).close() then: !connectionFactory.getCreatedConnections().get(0).isClosed() @@ -107,17 +111,17 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should throw if pool is exhausted'() throws InterruptedException { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).maxWaitTime(1, MILLISECONDS).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: - def first = pool.get(new OperationContext()) + def first = pool.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50))) then: first != null when: - pool.get(new OperationContext()) + pool.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50))) then: thrown(MongoTimeoutException) @@ -126,12 +130,14 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should throw on timeout'() throws InterruptedException { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).maxWaitTime(50, MILLISECONDS).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() - pool.get(new OperationContext()) + + def timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(50) + pool.get(createOperationContext(timeoutSettings)) when: - TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool) + TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool, timeoutSettings) new Thread(connectionGetter).start() connectionGetter.latch.await() @@ -143,7 +149,7 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should have size of 0 with default settings'() { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(10).maintenanceInitialDelay(5, MINUTES).build(), mockSdamProvider()) + builder().maxSize(10).maintenanceInitialDelay(5, MINUTES).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: @@ -157,7 +163,8 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should ensure min pool size after maintenance task runs'() { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(10).minSize(5).maintenanceInitialDelay(5, MINUTES).build(), mockSdamProvider()) + builder().maxSize(10).minSize(5).maintenanceInitialDelay(5, MINUTES).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: 'the maintenance tasks runs' @@ -187,7 +194,7 @@ class DefaultConnectionPoolSpecification extends Specification { def settings = builder().maxSize(10).minSize(5).addConnectionPoolListener(listener).build() when: - pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider()) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) then: 1 * listener.connectionPoolCreated { it.serverId == SERVER_ID && it.settings == settings } @@ -197,7 +204,7 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) def settings = builder().maxSize(10).minSize(5).addConnectionPoolListener(listener).build() - pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider()) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: pool.close() @@ -209,11 +216,11 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: pool.ready() - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) then: 1 * listener.connectionCreated { it.connectionId.serverId == SERVER_ID } @@ -234,7 +241,7 @@ class DefaultConnectionPoolSpecification extends Specification { connection.opened() >> false when: 'connection pool is created' - pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider()) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) then: '"pool is created" log message is emitted' def poolCreatedLogMessage = getMessage("Connection pool created") "Connection pool created for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()} using options " + @@ -250,7 +257,7 @@ class DefaultConnectionPoolSpecification extends Specification { "Connection pool ready for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}" == poolReadyLogMessage when: 'connection is created' - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) then: '"connection created" and "connection ready" log messages are emitted' def createdLogMessage = getMessage( "Connection created") def readyLogMessage = getMessage("Connection ready") @@ -260,7 +267,7 @@ class DefaultConnectionPoolSpecification extends Specification { ", driver-generated ID=${driverConnectionId}, established in=\\d+ ms" when: 'connection is released back into the pool on close' - pool.get(new OperationContext()).close() + pool.get(OPERATION_CONTEXT).close() then: '"connection check out" and "connection checked in" log messages are emitted' def checkoutStartedMessage = getMessage("Connection checkout started") def connectionCheckedInMessage = getMessage("Connection checked in") @@ -295,7 +302,7 @@ class DefaultConnectionPoolSpecification extends Specification { "Connection pool closed for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}" == poolClosedLogMessage when: 'connection checked out on closed pool' - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) then: thrown(MongoServerUnavailableException) def connectionCheckoutFailedInMessage = getMessage("Connection checkout failed") @@ -316,12 +323,14 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should log on checkout timeout fail'() throws InterruptedException { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).maxWaitTime(50, MILLISECONDS).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() - pool.get(new OperationContext()) + + def timeoutSettings = ClusterFixture.TIMEOUT_SETTINGS.withMaxWaitTimeMS(50) + pool.get(createOperationContext(timeoutSettings)) when: - TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool) + TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool, timeoutSettings) new Thread(connectionGetter).start() connectionGetter.latch.await() @@ -337,11 +346,12 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should log on connection become idle'() { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(2).minSize(0).maxConnectionIdleTime(1, MILLISECONDS).build(), mockSdamProvider()) + builder().maxSize(2).minSize(0).maxConnectionIdleTime(1, MILLISECONDS).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: pool.ready() - pool.get(new OperationContext()).close() + pool.get(OPERATION_CONTEXT).close() //not cool - but we have no way of waiting for connection to become idle Thread.sleep(500) pool.close(); @@ -362,7 +372,7 @@ class DefaultConnectionPoolSpecification extends Specification { builder().maxSize(1) .minSize(0) .maxConnectionIdleTime(1, MILLISECONDS) - .build(), EmptyProvider.instance()) + .build(), EmptyProvider.instance(), OPERATION_CONTEXT_FACTORY) when: pool.ready() @@ -380,15 +390,15 @@ class DefaultConnectionPoolSpecification extends Specification { def connection = Mock(InternalConnection) connection.getDescription() >> new ConnectionDescription(SERVER_ID) connection.opened() >> false - connection.open() >> { throw new UncheckedIOException('expected failure', new IOException()) } + connection.open(OPERATION_CONTEXT) >> { throw new UncheckedIOException('expected failure', new IOException()) } connectionFactory.create(SERVER_ID, _) >> connection pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), - mockSdamProvider()) + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: try { - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) } catch (UncheckedIOException e) { if ('expected failure' != e.getMessage()) { throw e @@ -408,7 +418,7 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: pool.ready() @@ -423,9 +433,9 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() - def connection = pool.get(new OperationContext()) + def connection = pool.get(OPERATION_CONTEXT) connection.close() when: @@ -439,7 +449,7 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() def connection = selectConnectionAsyncAndGet(pool) connection.close() @@ -455,13 +465,13 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() - def connection = pool.get(new OperationContext()) + def connection = pool.get(OPERATION_CONTEXT) connection.close() when: - connection = pool.get(new OperationContext()) + connection = pool.get(OPERATION_CONTEXT) then: 1 * listener.connectionCheckedOut { it.connectionId.serverId == SERVER_ID } @@ -477,13 +487,13 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() def connection = selectConnectionAsyncAndGet(pool) connection.close() when: - connection = pool.get(new OperationContext()) + connection = pool.get(OPERATION_CONTEXT) then: 1 * listener.connectionCheckedOut { it.connectionId.serverId == SERVER_ID } @@ -501,15 +511,15 @@ class DefaultConnectionPoolSpecification extends Specification { def connection = Mock(InternalConnection) connection.getDescription() >> new ConnectionDescription(SERVER_ID) connection.opened() >> false - connection.open() >> { throw new UncheckedIOException('expected failure', new IOException()) } + connection.open(OPERATION_CONTEXT) >> { throw new UncheckedIOException('expected failure', new IOException()) } connectionFactory.create(SERVER_ID, _) >> connection pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), - mockSdamProvider()) + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: try { - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) } catch (UncheckedIOException e) { if ('expected failure' != e.getMessage()) { throw e @@ -526,12 +536,12 @@ class DefaultConnectionPoolSpecification extends Specification { def connection = Mock(InternalConnection) connection.getDescription() >> new ConnectionDescription(SERVER_ID) connection.opened() >> false - connection.openAsync(_) >> { SingleResultCallback callback -> - callback.onResult(null, new UncheckedIOException('expected failure', new IOException())) + connection.openAsync(_, _) >> { + it.last().onResult(null, new UncheckedIOException('expected failure', new IOException())) } connectionFactory.create(SERVER_ID, _) >> connection pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), - mockSdamProvider()) + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: @@ -549,12 +559,12 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should fire MongoConnectionPoolClearedException when checking out in paused state'() { given: - pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider()) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) Throwable caught = null when: try { - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) } catch (MongoConnectionPoolClearedException e) { caught = e } @@ -565,11 +575,11 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should fire MongoConnectionPoolClearedException when checking out asynchronously in paused state'() { given: - pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider()) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) CompletableFuture caught = new CompletableFuture<>() when: - pool.getAsync(new OperationContext()) { InternalConnection result, Throwable t -> + pool.getAsync(OPERATION_CONTEXT) { InternalConnection result, Throwable t -> if (t != null) { caught.complete(t) } @@ -582,14 +592,14 @@ class DefaultConnectionPoolSpecification extends Specification { def 'invalidate should record cause'() { given: - pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider()) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) RuntimeException cause = new RuntimeException() Throwable caught = null when: pool.invalidate(cause) try { - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) } catch (MongoConnectionPoolClearedException e) { caught = e } @@ -602,7 +612,7 @@ class DefaultConnectionPoolSpecification extends Specification { given: def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), - mockSdamProvider()) + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: pool.ready() @@ -618,9 +628,9 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should continue to fire events after pool is closed'() { def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() - def connection = pool.get(new OperationContext()) + def connection = pool.get(OPERATION_CONTEXT) pool.close() when: @@ -634,7 +644,7 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should continue to fire events after pool is closed (asynchronous)'() { def listener = Mock(ConnectionPoolListener) pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) - .addConnectionPoolListener(listener).build(), mockSdamProvider()) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() def connection = selectConnectionAsyncAndGet(pool) pool.close() @@ -650,7 +660,7 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should select connection asynchronously if one is immediately available'() { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() expect: @@ -660,11 +670,11 @@ class DefaultConnectionPoolSpecification extends Specification { def 'should select connection asynchronously if one is not immediately available'() { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() when: - def connection = pool.get(new OperationContext()) + def connection = pool.get(OPERATION_CONTEXT) def connectionLatch = selectConnectionAsync(pool) connection.close() @@ -675,9 +685,9 @@ class DefaultConnectionPoolSpecification extends Specification { def 'when getting a connection asynchronously should send MongoTimeoutException to callback after timeout period'() { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).maxWaitTime(5, MILLISECONDS).build(), mockSdamProvider()) + builder().maxSize(1).maxWaitTime(5, MILLISECONDS).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.ready() - pool.get(new OperationContext()) + pool.get(OPERATION_CONTEXT) def firstConnectionLatch = selectConnectionAsync(pool) def secondConnectionLatch = selectConnectionAsync(pool) @@ -697,7 +707,7 @@ class DefaultConnectionPoolSpecification extends Specification { def 'invalidate should do nothing when pool is closed'() { given: pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, - builder().maxSize(1).build(), mockSdamProvider()) + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) pool.close() when: @@ -713,7 +723,7 @@ class DefaultConnectionPoolSpecification extends Specification { def selectConnectionAsync(DefaultConnectionPool pool) { def serverLatch = new ConnectionLatch() - pool.getAsync(new OperationContext()) { InternalConnection result, Throwable e -> + pool.getAsync(OPERATION_CONTEXT) { InternalConnection result, Throwable e -> serverLatch.connection = result serverLatch.throwable = e serverLatch.latch.countDown() @@ -742,5 +752,4 @@ class DefaultConnectionPoolSpecification extends Specification { connection } } - } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy index eb27b23fdfb..5b894c7a735 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy @@ -16,42 +16,25 @@ package com.mongodb.internal.connection -import com.mongodb.MongoNamespace + import com.mongodb.ReadPreference -import com.mongodb.ServerAddress import com.mongodb.connection.ClusterConnectionMode -import com.mongodb.connection.ClusterId -import com.mongodb.connection.ConnectionDescription -import com.mongodb.connection.ConnectionId -import com.mongodb.connection.ServerId -import com.mongodb.internal.IgnorableRequestContext import com.mongodb.internal.async.SingleResultCallback -import com.mongodb.internal.binding.StaticBindingContext import com.mongodb.internal.diagnostics.logging.Logger import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonDocument import org.bson.BsonInt32 import org.bson.codecs.BsonDocumentCodec -import spock.lang.Shared import spock.lang.Specification -import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.CustomMatchers.compare -import static com.mongodb.connection.ServerType.SHARD_ROUTER -import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER class DefaultServerConnectionSpecification extends Specification { - def namespace = new MongoNamespace('test', 'test') def internalConnection = Mock(InternalConnection) def callback = errorHandlingCallback(Mock(SingleResultCallback), Mock(Logger)) - @Shared - def standaloneConnectionDescription = new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), - 3, STANDALONE, 100, 100, 100, []) - @Shared - def mongosConnectionDescription = new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), - 3, SHARD_ROUTER, 100, 100, 100, []) def 'should execute command protocol asynchronously'() { given: @@ -60,16 +43,14 @@ class DefaultServerConnectionSpecification extends Specification { def codec = new BsonDocumentCodec() def executor = Mock(ProtocolExecutor) def connection = new DefaultServerConnection(internalConnection, executor, ClusterConnectionMode.MULTIPLE) - def operationContext = new OperationContext() - def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, getServerApi(), IgnorableRequestContext.INSTANCE, - operationContext) + when: - connection.commandAsync('test', command, validator, ReadPreference.primary(), codec, context, callback) + connection.commandAsync('test', command, validator, ReadPreference.primary(), codec, OPERATION_CONTEXT, callback) then: 1 * executor.executeAsync({ compare(new CommandProtocolImpl('test', command, validator, ReadPreference.primary(), codec, true, null, null, - ClusterConnectionMode.MULTIPLE, getServerApi(), IgnorableRequestContext.INSTANCE, operationContext), it) - }, internalConnection, NoOpSessionContext.INSTANCE, callback) + ClusterConnectionMode.MULTIPLE, OPERATION_CONTEXT), it) + }, internalConnection, OPERATION_CONTEXT.getSessionContext(), callback) } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy index 42626a46d9c..c452d757a28 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy @@ -39,6 +39,7 @@ import java.nio.ByteBuffer import java.util.concurrent.CountDownLatch import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER @SuppressWarnings('BusyWait') @@ -79,12 +80,14 @@ class DefaultServerMonitorSpecification extends Specification { def internalConnectionFactory = Mock(InternalConnectionFactory) { create(_) >> { Mock(InternalConnection) { - open() >> { sleep(100) } + open(_) >> { sleep(100) } } } } monitor = new DefaultServerMonitor(new ServerId(new ClusterId(), new ServerAddress()), ServerSettings.builder().build(), - internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, SameObjectProvider.initialized(sdam)) + internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, SameObjectProvider.initialized(sdam), + OPERATION_CONTEXT_FACTORY) + monitor.start() when: @@ -143,7 +146,7 @@ class DefaultServerMonitorSpecification extends Specification { def internalConnectionFactory = Mock(InternalConnectionFactory) { create(_) >> { Mock(InternalConnection) { - open() >> { } + open(_) >> { } getBuffer(_) >> { int size -> new ByteBufNIO(ByteBuffer.allocate(size)) @@ -167,7 +170,7 @@ class DefaultServerMonitorSpecification extends Specification { } monitor = new DefaultServerMonitor(new ServerId(new ClusterId(), new ServerAddress()), ServerSettings.builder().heartbeatFrequency(1, TimeUnit.SECONDS).addServerMonitorListener(serverMonitorListener).build(), - internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider()) + internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: monitor.start() @@ -222,7 +225,7 @@ class DefaultServerMonitorSpecification extends Specification { def internalConnectionFactory = Mock(InternalConnectionFactory) { create(_) >> { Mock(InternalConnection) { - open() >> { } + open(_) >> { } getBuffer(_) >> { int size -> new ByteBufNIO(ByteBuffer.allocate(size)) @@ -246,7 +249,7 @@ class DefaultServerMonitorSpecification extends Specification { } monitor = new DefaultServerMonitor(new ServerId(new ClusterId(), new ServerAddress()), ServerSettings.builder().heartbeatFrequency(1, TimeUnit.SECONDS).addServerMonitorListener(serverMonitorListener).build(), - internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider()) + internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: monitor.start() diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy index a0b96706f0e..f054457b877 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy @@ -36,11 +36,11 @@ import com.mongodb.connection.ServerType import com.mongodb.event.CommandListener import com.mongodb.event.ServerDescriptionChangedEvent import com.mongodb.event.ServerListener -import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.async.SingleResultCallback -import com.mongodb.internal.binding.StaticBindingContext import com.mongodb.internal.inject.SameObjectProvider import com.mongodb.internal.session.SessionContext +import com.mongodb.internal.time.Timeout import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonDocument import org.bson.BsonInt32 @@ -50,7 +50,7 @@ import spock.lang.Specification import java.util.concurrent.CountDownLatch -import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.MongoCredential.createCredential import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE import static com.mongodb.connection.ClusterConnectionMode.SINGLE @@ -71,7 +71,7 @@ class DefaultServerSpecification extends Specification { Mock(SdamServerDescriptionManager), Mock(ServerListener), Mock(CommandListener), new ClusterClock(), false) when: - def receivedConnection = server.getConnection(new OperationContext()) + def receivedConnection = server.getConnection(OPERATION_CONTEXT) then: receivedConnection @@ -97,7 +97,7 @@ class DefaultServerSpecification extends Specification { when: def callback = new SupplyingCallback() - server.getConnectionAsync(new OperationContext(), callback) + server.getConnectionAsync(OPERATION_CONTEXT, callback) then: callback.get() == connection @@ -114,7 +114,7 @@ class DefaultServerSpecification extends Specification { server.close() when: - server.getConnection(new OperationContext()) + server.getConnection(OPERATION_CONTEXT) then: def ex = thrown(MongoServerUnavailableException) @@ -124,7 +124,7 @@ class DefaultServerSpecification extends Specification { def latch = new CountDownLatch(1) def receivedConnection = null def receivedThrowable = null - server.getConnectionAsync(new OperationContext()) { + server.getConnectionAsync(OPERATION_CONTEXT) { result, throwable -> receivedConnection = result; receivedThrowable = throwable; latch.countDown() } @@ -166,7 +166,7 @@ class DefaultServerSpecification extends Specification { given: def connectionPool = Mock(ConnectionPool) def serverMonitor = Mock(ServerMonitor) - connectionPool.get(new OperationContext()) >> { throw exceptionToThrow } + connectionPool.get(OPERATION_CONTEXT) >> { throw exceptionToThrow } def server = defaultServer(connectionPool, serverMonitor) server.close() @@ -187,7 +187,7 @@ class DefaultServerSpecification extends Specification { def server = defaultServer(connectionPool, serverMonitor) when: - server.getConnection(new OperationContext()) + server.getConnection(OPERATION_CONTEXT) then: def e = thrown(MongoException) @@ -212,7 +212,7 @@ class DefaultServerSpecification extends Specification { def server = defaultServer(connectionPool, serverMonitor) when: - server.getConnection(new OperationContext()) + server.getConnection(OPERATION_CONTEXT) then: def e = thrown(MongoSecurityException) @@ -237,7 +237,7 @@ class DefaultServerSpecification extends Specification { def latch = new CountDownLatch(1) def receivedConnection = null def receivedThrowable = null - server.getConnectionAsync(new OperationContext()) { + server.getConnectionAsync(OPERATION_CONTEXT) { result, throwable -> receivedConnection = result; receivedThrowable = throwable; latch.countDown() } @@ -270,7 +270,7 @@ class DefaultServerSpecification extends Specification { def latch = new CountDownLatch(1) def receivedConnection = null def receivedThrowable = null - server.getConnectionAsync(new OperationContext()) { + server.getConnectionAsync(OPERATION_CONTEXT) { result, throwable -> receivedConnection = result; receivedThrowable = throwable; latch.countDown() } @@ -306,19 +306,19 @@ class DefaultServerSpecification extends Specification { ''') def protocol = new TestCommandProtocol(response) testConnection.enqueueProtocol(protocol) - def context = new StaticBindingContext(sessionContext, getServerApi(), IgnorableRequestContext.INSTANCE, new OperationContext()) + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) when: if (async) { CountDownLatch latch = new CountDownLatch(1) testConnection.commandAsync('admin', new BsonDocument('ping', new BsonInt32(1)), NO_OP_FIELD_NAME_VALIDATOR, - ReadPreference.primary(), new BsonDocumentCodec(), context) { + ReadPreference.primary(), new BsonDocumentCodec(), operationContext) { BsonDocument result, Throwable t -> latch.countDown() } latch.await() } else { testConnection.command('admin', new BsonDocument('ping', new BsonInt32(1)), NO_OP_FIELD_NAME_VALIDATOR, - ReadPreference.primary(), new BsonDocumentCodec(), context) + ReadPreference.primary(), new BsonDocumentCodec(), operationContext) } then: @@ -379,7 +379,7 @@ class DefaultServerSpecification extends Specification { } @Override - TestCommandProtocol sessionContext(final SessionContext sessionContext) { + TestCommandProtocol withSessionContext(final SessionContext sessionContext) { contextClusterTime = sessionContext.clusterTime sessionContext.advanceClusterTime(responseDocument.getDocument('$clusterTime')) sessionContext.advanceOperationTime(responseDocument.getTimestamp('operationTime')) @@ -394,7 +394,7 @@ class DefaultServerSpecification extends Specification { } @Override - Cluster.ServersSnapshot getServersSnapshot() { + Cluster.ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) { Cluster.ServersSnapshot result = { serverAddress -> throw new UnsupportedOperationException() } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy index 921e9fc045b..802cf044aac 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy @@ -36,8 +36,8 @@ import java.util.concurrent.TimeUnit import static com.mongodb.internal.connection.DescriptionHelper.createConnectionDescription import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription -import static org.bson.BsonDocument.parse import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER +import static org.bson.BsonDocument.parse class DescriptionHelperSpecification extends Specification { private final ServerAddress serverAddress = new ServerAddress('localhost', 27018) @@ -150,7 +150,7 @@ class DescriptionHelperSpecification extends Specification { def 'server description should reflect not ok legacy hello result'() { expect: createServerDescription(serverAddress, - parse('{ok : 0}'), roundTripTime) == + parse('{ok : 0}'), roundTripTime, 0) == ServerDescription.builder() .ok(false) .address(serverAddress) @@ -162,7 +162,7 @@ class DescriptionHelperSpecification extends Specification { def 'server description should reflect last update time'() { expect: createServerDescription(serverAddress, - parse('{ ok : 1 }'), roundTripTime).getLastUpdateTime(TimeUnit.NANOSECONDS) == Time.CONSTANT_TIME + parse('{ ok : 1 }'), roundTripTime, 0).getLastUpdateTime(TimeUnit.NANOSECONDS) == Time.CONSTANT_TIME } def 'server description should reflect roundTripNanos'() { @@ -177,7 +177,7 @@ class DescriptionHelperSpecification extends Specification { maxWireVersion : 3, minWireVersion : 0, ok : 1 - }"""), roundTripTime).roundTripTimeNanos == + }"""), roundTripTime, 0).roundTripTimeNanos == ServerDescription.builder() .ok(true) .address(serverAddress) @@ -201,7 +201,7 @@ class DescriptionHelperSpecification extends Specification { maxWireVersion : 3, minWireVersion : 0, ok : 1 - }"""), roundTripTime) == + }"""), roundTripTime, 0) == ServerDescription.builder() .ok(true) .address(serverAddress) @@ -235,7 +235,7 @@ class DescriptionHelperSpecification extends Specification { "maxWireVersion" : 3, "minWireVersion" : 0, "ok" : 1 - }"""), roundTripTime) == + }"""), roundTripTime, 0) == ServerDescription.builder() .ok(true) .address(new ServerAddress('localhost', 27018)) @@ -274,7 +274,7 @@ class DescriptionHelperSpecification extends Specification { "minWireVersion" : 0, "lastWrite" : { "lastWriteDate" : ISODate("2016-03-04T23:14:07.338Z") } "ok" : 1 - }"""), roundTripTime) == + }"""), roundTripTime, 0) == ServerDescription.builder() .ok(true) .address(new ServerAddress('localhost', 27018)) @@ -326,7 +326,7 @@ class DescriptionHelperSpecification extends Specification { "setVersion" : 2, tags : { "dc" : "east", "use" : "production" } "ok" : 1 - }"""), roundTripTime) + }"""), roundTripTime, 0) then: serverDescription == @@ -374,7 +374,7 @@ class DescriptionHelperSpecification extends Specification { "maxWireVersion" : 3, "minWireVersion" : 0, "ok" : 1 - }"""), roundTripTime) == + }"""), roundTripTime, 0) == ServerDescription.builder() .ok(true) .address(serverAddress) @@ -418,7 +418,7 @@ class DescriptionHelperSpecification extends Specification { "maxWireVersion" : 3, "minWireVersion" : 0, "ok" : 1 - }"""), roundTripTime) + }"""), roundTripTime, 0) then: serverDescription == @@ -466,7 +466,7 @@ class DescriptionHelperSpecification extends Specification { "maxWireVersion" : 3, "minWireVersion" : 0, "ok" : 1 - }"""), roundTripTime) == + }"""), roundTripTime, 0) == ServerDescription.builder() .ok(true) .address(serverAddressOfHidden) @@ -499,7 +499,7 @@ class DescriptionHelperSpecification extends Specification { "maxWireVersion" : 3, "minWireVersion" : 0, "ok" : 1 - }"""), roundTripTime) == + }"""), roundTripTime, 0) == ServerDescription.builder() .ok(true) .address(serverAddress) @@ -525,7 +525,7 @@ class DescriptionHelperSpecification extends Specification { "maxWireVersion" : 3, "minWireVersion" : 0, "ok" : 1 - }"""), roundTripTime) == + }"""), roundTripTime, 0) == ServerDescription.builder() .ok(true) .address(serverAddress) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageSpecification.groovy deleted file mode 100644 index 514499c86b4..00000000000 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageSpecification.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.connection - -import spock.lang.Specification - - -class ExponentiallyWeightedMovingAverageSpecification extends Specification { - - def 'constructor should throw if alpha is not between 0.0 and 1.0'() { - when: - new ExponentiallyWeightedMovingAverage(alpha) - - then: - thrown(IllegalArgumentException) - - where: - alpha << [-0.001, -0.01, -0.1, -1, 1.001, 1.01, 1.1] - } - - def 'constructor should not throw if alpha is between 0.0 and 1.0'() { - when: - new ExponentiallyWeightedMovingAverage(alpha) - - then: - true - - where: - alpha << [-0.0, 0.01, 0.1, 0.001, 0.01, 0.1, 0.2, 1.0] - } - - def 'the average should be exponentially weighted'() { - when: - def average = new ExponentiallyWeightedMovingAverage(alpha) - for (def sample : samples) { - average.addSample(sample) - } - - then: - average.getAverage() == result - - where: - alpha << [0.2, 0.2, 0.2, 0.2, 0.2] - samples << [[], [10], [10, 20], [10, 20, 12], [10, 20, 12, 17]] - result << [0, 10, 12, 12, 13] - } -} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java new file mode 100644 index 00000000000..59da49bfbe5 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.List; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +public class ExponentiallyWeightedMovingAverageTest { + + @ParameterizedTest(name = "{index}: {0}") + @ValueSource(doubles = {-0.001, -0.01, -0.1, -1, 1.001, 1.01, 1.1}) + @DisplayName("constructor should throw if alpha is not between 0.0 and 1.0") + void testInvalidAlpha(final double alpha) { + assertThrows(IllegalArgumentException.class, () -> new ExponentiallyWeightedMovingAverage(alpha)); + } + + @ParameterizedTest(name = "{index}: {0}") + @ValueSource(doubles = {-0.0, 0.01, 0.1, 0.001, 0.01, 0.1, 0.2, 1.0}) + @DisplayName("constructor should not throw if alpha is between 0.0 and 1.0") + void testValidAlpha(final double alpha) { + assertDoesNotThrow(() -> new ExponentiallyWeightedMovingAverage(alpha)); + } + + + @ParameterizedTest(name = "{index}: samples: {1}. Expected: {2}") + @DisplayName("the average should be exponentially weighted") + @MethodSource + public void testAverageIsExponentiallyWeighted(final double alpha, final List samples, final int expectedAverageRTT) { + ExponentiallyWeightedMovingAverage average = new ExponentiallyWeightedMovingAverage(alpha); + samples.forEach(average::addSample); + + assertEquals(expectedAverageRTT, average.getAverage()); + } + + private static Stream testAverageIsExponentiallyWeighted() { + return Stream.of( + Arguments.of(0.2, emptyList(), 0), + Arguments.of(0.2, singletonList(10), 10), + Arguments.of(0.2, asList(10, 20), 12), + Arguments.of(0.2, asList(10, 20, 12), 12), + Arguments.of(0.2, asList(10, 20, 12, 17), 13) + ); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy index c389e647be1..93bc656226a 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy @@ -27,6 +27,7 @@ import com.mongodb.connection.ServerConnectionState import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerId import com.mongodb.connection.ServerType +import com.mongodb.internal.TimeoutSettings import org.bson.BsonArray import org.bson.BsonBoolean import org.bson.BsonDocument @@ -47,11 +48,13 @@ import static com.mongodb.internal.connection.ClientMetadataHelperProseTest.crea import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply import static com.mongodb.internal.connection.MessageHelper.decodeCommand +import static com.mongodb.internal.connection.OperationContext.simpleOperationContext class InternalStreamConnectionInitializerSpecification extends Specification { def serverId = new ServerId(new ClusterId(), new ServerAddress()) def internalConnection = new TestInternalConnection(serverId, ServerType.STANDALONE) + def operationContext = simpleOperationContext(TimeoutSettings.DEFAULT, null) def 'should create correct description'() { given: @@ -59,8 +62,8 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulReplies(false, null) - def description = initializer.startHandshake(internalConnection) - description = initializer.finishHandshake(internalConnection, description) + def description = initializer.startHandshake(internalConnection, operationContext) + description = initializer.finishHandshake(internalConnection, description, operationContext) def connectionDescription = description.connectionDescription def serverDescription = description.serverDescription @@ -76,10 +79,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulReplies(false, null) def futureCallback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, futureCallback) + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) def description = futureCallback.get() futureCallback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, futureCallback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) description = futureCallback.get() def connectionDescription = description.connectionDescription def serverDescription = description.serverDescription @@ -95,8 +98,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulReplies(false, 123) - def internalDescription = initializer.startHandshake(internalConnection) - def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext) + .connectionDescription then: connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123) @@ -108,8 +112,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulRepliesWithConnectionIdIsHelloResponse(false, 123) - def internalDescription = initializer.startHandshake(internalConnection) - def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext) + .connectionDescription then: connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123) @@ -122,10 +127,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulReplies(false, 123) def futureCallback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, futureCallback) + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) def description = futureCallback.get() futureCallback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, futureCallback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) def connectionDescription = futureCallback.get().connectionDescription then: @@ -139,10 +144,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulRepliesWithConnectionIdIsHelloResponse(false, 123) def futureCallback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, futureCallback) + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) def description = futureCallback.get() futureCallback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, futureCallback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) description = futureCallback.get() def connectionDescription = description.connectionDescription @@ -158,12 +163,13 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulReplies(false, null) - def internalDescription = initializer.startHandshake(internalConnection) - def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext) + .connectionDescription then: connectionDescription - 1 * firstAuthenticator.authenticate(internalConnection, _) + 1 * firstAuthenticator.authenticate(internalConnection, _, _) } def 'should authenticate asynchronously'() { @@ -175,15 +181,15 @@ class InternalStreamConnectionInitializerSpecification extends Specification { enqueueSuccessfulReplies(false, null) def futureCallback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, futureCallback) + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) def description = futureCallback.get() futureCallback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, futureCallback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) def connectionDescription = futureCallback.get().connectionDescription then: connectionDescription - 1 * authenticator.authenticateAsync(internalConnection, _, _) >> { it[2].onResult(null, null) } + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) >> { it[3].onResult(null, null) } } def 'should not authenticate if server is an arbiter'() { @@ -194,12 +200,13 @@ class InternalStreamConnectionInitializerSpecification extends Specification { when: enqueueSuccessfulReplies(true, null) - def internalDescription = initializer.startHandshake(internalConnection) - def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext) + .connectionDescription then: connectionDescription - 0 * authenticator.authenticate(internalConnection, _) + 0 * authenticator.authenticate(internalConnection, _, _) } def 'should not authenticate asynchronously if server is an arbiter asynchronously'() { @@ -211,10 +218,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification { enqueueSuccessfulReplies(true, null) def futureCallback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, futureCallback) + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) def description = futureCallback.get() futureCallback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, futureCallback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) def connectionDescription = futureCallback.get().connectionDescription then: @@ -236,14 +243,14 @@ class InternalStreamConnectionInitializerSpecification extends Specification { enqueueSuccessfulReplies(false, null) if (async) { def callback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, callback) + initializer.startHandshakeAsync(internalConnection, operationContext, callback) def description = callback.get() callback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, callback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback) callback.get() } else { - def internalDescription = initializer.startHandshake(internalConnection) - initializer.finishHandshake(internalConnection, internalDescription) + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + initializer.finishHandshake(internalConnection, internalDescription, operationContext) } then: @@ -273,14 +280,14 @@ class InternalStreamConnectionInitializerSpecification extends Specification { enqueueSuccessfulReplies(false, null) if (async) { def callback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, callback) + initializer.startHandshakeAsync(internalConnection, operationContext, callback) def description = callback.get() callback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, callback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback) callback.get() } else { - def internalDescription = initializer.startHandshake(internalConnection) - initializer.finishHandshake(internalConnection, internalDescription) + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + initializer.finishHandshake(internalConnection, internalDescription, operationContext) } then: @@ -312,9 +319,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification { then: description if (async) { - 1 * scramShaAuthenticator.authenticateAsync(internalConnection, _, _) + 1 * scramShaAuthenticator.authenticateAsync(internalConnection, _, _, _) } else { - 1 * scramShaAuthenticator.authenticate(internalConnection, _) + 1 * scramShaAuthenticator.authenticate(internalConnection, _, _) } 1 * ((SpeculativeAuthenticator) scramShaAuthenticator).createSpeculativeAuthenticateCommand(_) ((SpeculativeAuthenticator) scramShaAuthenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse @@ -343,9 +350,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification { then: description if (async) { - 1 * authenticator.authenticateAsync(internalConnection, _, _) + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) } else { - 1 * authenticator.authenticate(internalConnection, _) + 1 * authenticator.authenticate(internalConnection, _, _) } 1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_) ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse @@ -374,9 +381,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification { then: description if (async) { - 1 * authenticator.authenticateAsync(internalConnection, _, _) + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) } else { - 1 * authenticator.authenticate(internalConnection, _) + 1 * authenticator.authenticate(internalConnection, _, _) } 1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_) ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse @@ -402,9 +409,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification { then: description if (async) { - 1 * authenticator.authenticateAsync(internalConnection, _, _) + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) } else { - 1 * authenticator.authenticate(internalConnection, _) + 1 * authenticator.authenticate(internalConnection, _, _) } 1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_) ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse @@ -444,14 +451,14 @@ class InternalStreamConnectionInitializerSpecification extends Specification { final TestInternalConnection connection) { if (async) { def callback = new FutureResultCallback() - initializer.startHandshakeAsync(internalConnection, callback) + initializer.startHandshakeAsync(internalConnection, operationContext, callback) def description = callback.get() callback = new FutureResultCallback() - initializer.finishHandshakeAsync(internalConnection, description, callback) + initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback) callback.get() } else { - def internalDescription = initializer.startHandshake(connection) - initializer.finishHandshake(connection, internalDescription) + def internalDescription = initializer.startHandshake(connection, operationContext) + initializer.finishHandshake(connection, internalDescription, operationContext) } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy index c0cd580e02e..7a0dca34526 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy @@ -20,14 +20,15 @@ import com.mongodb.MongoCommandException import com.mongodb.MongoInternalException import com.mongodb.MongoInterruptedException import com.mongodb.MongoNamespace +import com.mongodb.MongoOperationTimeoutException import com.mongodb.MongoSocketClosedException import com.mongodb.MongoSocketException import com.mongodb.MongoSocketReadException +import com.mongodb.MongoSocketReadTimeoutException import com.mongodb.MongoSocketWriteException import com.mongodb.ReadConcern import com.mongodb.ServerAddress import com.mongodb.async.FutureResultCallback -import com.mongodb.connection.AsyncCompletionHandler import com.mongodb.connection.ClusterId import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ConnectionId @@ -39,14 +40,13 @@ import com.mongodb.event.CommandFailedEvent import com.mongodb.event.CommandStartedEvent import com.mongodb.event.CommandSucceededEvent import com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils -import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.session.SessionContext import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonDocument import org.bson.BsonInt32 import org.bson.BsonReader import org.bson.BsonString -import org.bson.ByteBuf import org.bson.ByteBufNIO import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DecoderContext @@ -59,6 +59,8 @@ import java.util.concurrent.CountDownLatch import java.util.concurrent.ExecutorService import java.util.concurrent.Executors +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT import static com.mongodb.ReadPreference.primary import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE import static com.mongodb.connection.ClusterConnectionMode.SINGLE @@ -94,16 +96,16 @@ class InternalStreamConnectionSpecification extends Specification { def internalConnectionInitializationDescription = new InternalConnectionInitializationDescription(connectionDescription, serverDescription) def stream = Mock(Stream) { - openAsync(_) >> { it[0].completed(null) } + openAsync(_, _) >> { it.last().completed(null) } } def streamFactory = Mock(StreamFactory) { create(_) >> { stream } } def initializer = Mock(InternalConnectionInitializer) { - startHandshake(_) >> { internalConnectionInitializationDescription } - finishHandshake(_, _) >> { internalConnectionInitializationDescription } - startHandshakeAsync(_, _) >> { it[1].onResult(internalConnectionInitializationDescription, null) } - finishHandshakeAsync(_, _, _) >> { it[2].onResult(internalConnectionInitializationDescription, null) } + startHandshake(_, _) >> { internalConnectionInitializationDescription } + finishHandshake(_, _, _) >> { internalConnectionInitializationDescription } + startHandshakeAsync(_, _, _) >> { it[2].onResult(internalConnectionInitializationDescription, null) } + finishHandshakeAsync(_, _, _, _) >> { it[3].onResult(internalConnectionInitializationDescription, null) } } def getConnection() { @@ -113,7 +115,7 @@ class InternalStreamConnectionSpecification extends Specification { def getOpenedConnection() { def connection = getConnection() - connection.open() + connection.open(OPERATION_CONTEXT) connection } @@ -131,7 +133,7 @@ class InternalStreamConnectionSpecification extends Specification { .lastUpdateTimeNanos(connection.getInitialServerDescription().getLastUpdateTime(NANOSECONDS)) .build() when: - connection.open() + connection.open(OPERATION_CONTEXT) then: connection.opened() @@ -158,7 +160,7 @@ class InternalStreamConnectionSpecification extends Specification { .build() when: - connection.openAsync(futureResultCallback) + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() then: @@ -170,13 +172,13 @@ class InternalStreamConnectionSpecification extends Specification { def 'should close the stream when initialization throws an exception'() { given: def failedInitializer = Mock(InternalConnectionInitializer) { - startHandshake(_) >> { throw new MongoInternalException('Something went wrong') } + startHandshake(_, _) >> { throw new MongoInternalException('Something went wrong') } } def connection = new InternalStreamConnection(SINGLE, SERVER_ID, new TestConnectionGenerationSupplier(), streamFactory, [], null, failedInitializer) when: - connection.open() + connection.open(OPERATION_CONTEXT) then: thrown MongoInternalException @@ -187,14 +189,14 @@ class InternalStreamConnectionSpecification extends Specification { def 'should close the stream when initialization throws an exception asynchronously'() { given: def failedInitializer = Mock(InternalConnectionInitializer) { - startHandshakeAsync(_, _) >> { it[1].onResult(null, new MongoInternalException('Something went wrong')) } + startHandshakeAsync(_, _, _) >> { it[2].onResult(null, new MongoInternalException('Something went wrong')) } } def connection = new InternalStreamConnection(SINGLE, SERVER_ID, new TestConnectionGenerationSupplier(), streamFactory, [], null, failedInitializer) when: def futureResultCallback = new FutureResultCallback() - connection.openAsync(futureResultCallback) + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() then: @@ -204,21 +206,21 @@ class InternalStreamConnectionSpecification extends Specification { def 'should close the stream when writing a message throws an exception'() { given: - stream.write(_) >> { throw new IOException('Something went wrong') } + stream.write(_, _) >> { throw new IOException('Something went wrong') } def connection = getOpenedConnection() def (buffers1, messageId1) = helper.hello() def (buffers2, messageId2) = helper.hello() when: - connection.sendMessage(buffers1, messageId1) + connection.sendMessage(buffers1, messageId1, OPERATION_CONTEXT) then: connection.isClosed() thrown MongoSocketWriteException when: - connection.sendMessage(buffers2, messageId2) + connection.sendMessage(buffers2, messageId2, OPERATION_CONTEXT) then: thrown MongoSocketClosedException @@ -231,7 +233,7 @@ class InternalStreamConnectionSpecification extends Specification { def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync() int seen = 0 - stream.writeAsync(_, _) >> { List buffers, AsyncCompletionHandler callback -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> if (seen == 0) { seen += 1 return callback.failed(new IOException('Something went wrong')) @@ -242,7 +244,7 @@ class InternalStreamConnectionSpecification extends Specification { def connection = getOpenedConnection() when: - connection.sendMessageAsync(buffers1, messageId1, sndCallbck1) + connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1) sndCallbck1.get(10, SECONDS) then: @@ -250,7 +252,7 @@ class InternalStreamConnectionSpecification extends Specification { connection.isClosed() when: - connection.sendMessageAsync(buffers2, messageId2, sndCallbck2) + connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2) sndCallbck2.get(10, SECONDS) then: @@ -259,23 +261,23 @@ class InternalStreamConnectionSpecification extends Specification { def 'should close the stream when reading the message header throws an exception'() { given: - stream.read(16, 0) >> { throw new IOException('Something went wrong') } + stream.read(16, _) >> { throw new IOException('Something went wrong') } def connection = getOpenedConnection() def (buffers1, messageId1) = helper.hello() def (buffers2, messageId2) = helper.hello() when: - connection.sendMessage(buffers1, messageId1) - connection.sendMessage(buffers2, messageId2) - connection.receiveMessage(messageId1) + connection.sendMessage(buffers1, messageId1, OPERATION_CONTEXT) + connection.sendMessage(buffers2, messageId2, OPERATION_CONTEXT) + connection.receiveMessage(messageId1, OPERATION_CONTEXT) then: connection.isClosed() thrown MongoSocketReadException when: - connection.receiveMessage(messageId2) + connection.receiveMessage(messageId2, OPERATION_CONTEXT) then: thrown MongoSocketClosedException @@ -283,12 +285,12 @@ class InternalStreamConnectionSpecification extends Specification { def 'should throw MongoInternalException when reply header message length > max message length'() { given: - stream.read(36, 0) >> { helper.headerWithMessageSizeGreaterThanMax(1) } + stream.read(36, _) >> { helper.headerWithMessageSizeGreaterThanMax(1) } def connection = getOpenedConnection() when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: thrown(MongoInternalException) @@ -297,7 +299,7 @@ class InternalStreamConnectionSpecification extends Specification { def 'should throw MongoInternalException when reply header message length > max message length asynchronously'() { given: - stream.readAsync(16, _) >> { int numBytes, AsyncCompletionHandler handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.headerWithMessageSizeGreaterThanMax(1, connectionDescription.maxMessageSize)) } @@ -305,7 +307,7 @@ class InternalStreamConnectionSpecification extends Specification { def callback = new FutureResultCallback() when: - connection.receiveMessageAsync(1, callback) + connection.receiveMessageAsync(1, OPERATION_CONTEXT, callback) callback.get() then: @@ -315,12 +317,12 @@ class InternalStreamConnectionSpecification extends Specification { def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.write throws InterruptedIOException'() { given: - stream.write(_) >> { throw new InterruptedIOException() } + stream.write(_, _) >> { throw new InterruptedIOException() } def connection = getOpenedConnection() Thread.currentThread().interrupt() when: - connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1) + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) then: Thread.interrupted() @@ -330,11 +332,11 @@ class InternalStreamConnectionSpecification extends Specification { def 'should throw MongoInterruptedException and leave the interrupt status unset when Stream.write throws InterruptedIOException'() { given: - stream.write(_) >> { throw new InterruptedIOException() } + stream.write(_, _) >> { throw new InterruptedIOException() } def connection = getOpenedConnection() when: - connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1) + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) then: !Thread.interrupted() @@ -344,12 +346,12 @@ class InternalStreamConnectionSpecification extends Specification { def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.write throws ClosedByInterruptException'() { given: - stream.write(_) >> { throw new ClosedByInterruptException() } + stream.write(_, _) >> { throw new ClosedByInterruptException() } def connection = getOpenedConnection() Thread.currentThread().interrupt() when: - connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1) + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) then: Thread.interrupted() @@ -359,12 +361,12 @@ class InternalStreamConnectionSpecification extends Specification { def 'should throw MongoInterruptedException when Stream.write throws SocketException and the thread is interrupted'() { given: - stream.write(_) >> { throw new SocketException() } + stream.write(_, _) >> { throw new SocketException() } def connection = getOpenedConnection() Thread.currentThread().interrupt() when: - connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1) + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) then: Thread.interrupted() @@ -374,11 +376,11 @@ class InternalStreamConnectionSpecification extends Specification { def 'should throw MongoSocketWriteException when Stream.write throws SocketException and the thread is not interrupted'() { given: - stream.write(_) >> { throw new SocketException() } + stream.write(_, _) >> { throw new SocketException() } def connection = getOpenedConnection() when: - connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1) + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) then: thrown(MongoSocketWriteException) @@ -392,7 +394,7 @@ class InternalStreamConnectionSpecification extends Specification { Thread.currentThread().interrupt() when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: Thread.interrupted() @@ -406,7 +408,7 @@ class InternalStreamConnectionSpecification extends Specification { def connection = getOpenedConnection() when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: !Thread.interrupted() @@ -421,7 +423,7 @@ class InternalStreamConnectionSpecification extends Specification { Thread.currentThread().interrupt() when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: Thread.interrupted() @@ -436,7 +438,7 @@ class InternalStreamConnectionSpecification extends Specification { Thread.currentThread().interrupt() when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: Thread.interrupted() @@ -450,13 +452,95 @@ class InternalStreamConnectionSpecification extends Specification { def connection = getOpenedConnection() when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: thrown(MongoSocketReadException) connection.isClosed() } + def 'Should throw timeout exception with underlying socket exception as a cause when Stream.read throws SocketException'() { + given: + stream.read(_, _) >> { throw new SocketTimeoutException() } + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))) + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() instanceof SocketTimeoutException + + connection.isClosed() + } + + def 'Should wrap MongoSocketReadTimeoutException with MongoOperationTimeoutException'() { + given: + stream.read(_, _) >> { throw new MongoSocketReadTimeoutException("test", new ServerAddress(), null) } + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))) + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() == null + + connection.isClosed() + } + + + def 'Should wrap SocketException with timeout exception when Stream.read throws SocketException async'() { + given: + stream.readAsync(_ , _, _) >> { numBytes, operationContext, handler -> + handler.failed(new SocketTimeoutException()) + } + def connection = getOpenedConnection() + def callback = new FutureResultCallback() + def operationContext = OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) + when: + connection.receiveMessageAsync(1, operationContext, callback) + callback.get() + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() instanceof SocketTimeoutException + + connection.isClosed() + } + + def 'Should wrap MongoSocketReadTimeoutException with MongoOperationTimeoutException async'() { + given: + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> + handler.failed(new MongoSocketReadTimeoutException("test", new ServerAddress(), null)) + } + + def connection = getOpenedConnection() + def callback = new FutureResultCallback() + def operationContext = OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) + when: + connection.receiveMessageAsync(1, operationContext, callback) + callback.get() + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() == null + + connection.isClosed() + } + def 'should close the stream when reading the message header throws an exception asynchronously'() { given: int seen = 0 @@ -464,26 +548,26 @@ class InternalStreamConnectionSpecification extends Specification { def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync() def headers = helper.generateHeaders([messageId1, messageId2]) - stream.writeAsync(_, _) >> { List buffers, AsyncCompletionHandler callback -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> callback.completed(null) } - stream.readAsync(16, _) >> { int numBytes, AsyncCompletionHandler handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> if (seen == 0) { seen += 1 return handler.failed(new IOException('Something went wrong')) } handler.completed(headers.pop()) } - stream.readAsync(94, _) >> { int numBytes, AsyncCompletionHandler handler -> + stream.readAsync(94, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultBody()) } def connection = getOpenedConnection() when: - connection.sendMessageAsync(buffers1, messageId1, sndCallbck1) - connection.sendMessageAsync(buffers2, messageId2, sndCallbck2) - connection.receiveMessageAsync(messageId1, rcvdCallbck1) - connection.receiveMessageAsync(messageId2, rcvdCallbck2) + connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1) + connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2) + connection.receiveMessageAsync(messageId1, OPERATION_CONTEXT, rcvdCallbck1) + connection.receiveMessageAsync(messageId2, OPERATION_CONTEXT, rcvdCallbck2) rcvdCallbck1.get(1, SECONDS) then: @@ -499,20 +583,20 @@ class InternalStreamConnectionSpecification extends Specification { def 'should close the stream when reading the message body throws an exception'() { given: - stream.read(16, 0) >> helper.defaultMessageHeader(1) - stream.read(90, 0) >> { throw new IOException('Something went wrong') } + stream.read(16, _) >> helper.defaultMessageHeader(1) + stream.read(90, _) >> { throw new IOException('Something went wrong') } def connection = getOpenedConnection() when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: connection.isClosed() thrown MongoSocketReadException when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: thrown MongoSocketClosedException @@ -525,21 +609,21 @@ class InternalStreamConnectionSpecification extends Specification { def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync() def headers = helper.generateHeaders([messageId1, messageId2]) - stream.writeAsync(_, _) >> { List buffers, AsyncCompletionHandler callback -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> callback.completed(null) } - stream.readAsync(16, _) >> { int numBytes, AsyncCompletionHandler handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(headers.remove(0)) } - stream.readAsync(_, _) >> { int numBytes, AsyncCompletionHandler handler -> + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> handler.failed(new IOException('Something went wrong')) } def connection = getOpenedConnection() when: - connection.sendMessageAsync(buffers1, messageId1, sndCallbck1) - connection.sendMessageAsync(buffers2, messageId2, sndCallbck2) - connection.receiveMessageAsync(messageId1, rcvdCallbck1) + connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1) + connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2) + connection.receiveMessageAsync(messageId1, OPERATION_CONTEXT, rcvdCallbck1) rcvdCallbck1.get(1, SECONDS) then: @@ -547,7 +631,7 @@ class InternalStreamConnectionSpecification extends Specification { connection.isClosed() when: - connection.receiveMessageAsync(messageId2, rcvdCallbck2) + connection.receiveMessageAsync(messageId2, OPERATION_CONTEXT, rcvdCallbck2) rcvdCallbck2.get(1, SECONDS) then: @@ -562,12 +646,11 @@ class InternalStreamConnectionSpecification extends Specification { null) def response = '{ok : 0, errmsg : "failed"}' stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.messageHeader(commandMessage.getId(), response) - stream.read(_, 0) >> helper.reply(response) + stream.read(16, _) >> helper.messageHeader(commandMessage.getId(), response) + stream.read(_, _) >> helper.reply(response) when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: thrown(MongoCommandException) @@ -584,19 +667,18 @@ class InternalStreamConnectionSpecification extends Specification { def response = '{ok : 0, errmsg : "failed"}' stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultMessageHeader(commandMessage.getId())) } - stream.readAsync(_, _) >> { numBytes, handler -> + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.reply(response)) } when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) callback.get() then: @@ -612,7 +694,7 @@ class InternalStreamConnectionSpecification extends Specification { def messages = (1..numberOfOperations).collect { helper.helloAsync() } def streamLatch = new CountDownLatch(1) - stream.writeAsync(_, _) >> { List buffers, AsyncCompletionHandler callback -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> streamPool.submit { streamLatch.await() callback.failed(new IOException()) @@ -624,7 +706,7 @@ class InternalStreamConnectionSpecification extends Specification { def callbacks = [] (1..numberOfOperations).each { n -> def (buffers, messageId, sndCallbck, rcvdCallbck) = messages.pop() - connection.sendMessageAsync(buffers, messageId, sndCallbck) + connection.sendMessageAsync(buffers, messageId, OPERATION_CONTEXT, sndCallbck) callbacks.add(sndCallbck) } streamLatch.countDown() @@ -645,12 +727,11 @@ class InternalStreamConnectionSpecification extends Specification { def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, null) stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId()) - stream.read(90, 0) >> helper.defaultReply() + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> helper.defaultReply() when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: commandListener.eventsWereDelivered([ @@ -667,13 +748,13 @@ class InternalStreamConnectionSpecification extends Specification { def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, null) stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId()) - stream.read(90, 0) >> helper.defaultReply() + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> helper.defaultReply() when: connection.sendAndReceive(commandMessage, { BsonReader reader, DecoderContext decoderContext -> throw new CodecConfigurationException('') - }, NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext()) + }, OPERATION_CONTEXT) then: thrown(CodecConfigurationException) @@ -696,17 +777,17 @@ class InternalStreamConnectionSpecification extends Specification { $clusterTime : { clusterTime : { $timestamp : { "t" : 42, "i" : 21 } } } }''' stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId()) - stream.read(_, 0) >> helper.reply(response) + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(_, _) >> helper.reply(response) def sessionContext = Mock(SessionContext) { 1 * advanceOperationTime(BsonDocument.parse(response).getTimestamp('operationTime')) 1 * advanceClusterTime(BsonDocument.parse(response).getDocument('$clusterTime')) getReadConcern() >> ReadConcern.DEFAULT } + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), sessionContext, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), operationContext) then: true @@ -725,13 +806,13 @@ class InternalStreamConnectionSpecification extends Specification { $clusterTime : { clusterTime : { $timestamp : { "t" : 42, "i" : 21 } } } }''' stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultMessageHeader(commandMessage.getId())) } - stream.readAsync(_, _) >> { numBytes, handler -> + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.reply(response)) } def sessionContext = Mock(SessionContext) { @@ -739,10 +820,10 @@ class InternalStreamConnectionSpecification extends Specification { 1 * advanceClusterTime(BsonDocument.parse(response).getDocument('$clusterTime')) getReadConcern() >> ReadConcern.DEFAULT } + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), sessionContext, IgnorableRequestContext.INSTANCE, - new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), operationContext, callback) callback.get() then: @@ -756,11 +837,10 @@ class InternalStreamConnectionSpecification extends Specification { def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, null) stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.write(_) >> { throw new MongoSocketWriteException('Failed to write', serverAddress, new IOException()) } + stream.write(_, _) >> { throw new MongoSocketWriteException('Failed to write', serverAddress, new IOException()) } when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: def e = thrown(MongoSocketWriteException) @@ -777,11 +857,10 @@ class InternalStreamConnectionSpecification extends Specification { def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, null) stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> { throw new MongoSocketReadException('Failed to read', serverAddress) } + stream.read(16, _) >> { throw new MongoSocketReadException('Failed to read', serverAddress) } when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: def e = thrown(MongoSocketReadException) @@ -798,12 +877,11 @@ class InternalStreamConnectionSpecification extends Specification { def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, null) stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId()) - stream.read(90, 0) >> { throw new MongoSocketReadException('Failed to read', serverAddress) } + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> { throw new MongoSocketReadException('Failed to read', serverAddress) } when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: def e = thrown(MongoSocketException) @@ -821,12 +899,11 @@ class InternalStreamConnectionSpecification extends Specification { null) def response = '{ok : 0, errmsg : "failed"}' stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.messageHeader(commandMessage.getId(), response) - stream.read(_, 0) >> helper.reply(response) + stream.read(16, _) >> helper.messageHeader(commandMessage.getId(), response) + stream.read(_, _) >> helper.reply(response) when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: def e = thrown(MongoCommandException) @@ -843,12 +920,11 @@ class InternalStreamConnectionSpecification extends Specification { def commandMessage = new CommandMessage(cmdNamespace, securitySensitiveCommand, fieldNameValidator, primary(), messageSettings, MULTIPLE, null) stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId()) - stream.read(90, 0) >> helper.defaultReply() + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> helper.defaultReply() when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: commandListener.eventsWereDelivered([ @@ -880,12 +956,11 @@ class InternalStreamConnectionSpecification extends Specification { def commandMessage = new CommandMessage(cmdNamespace, securitySensitiveCommand, fieldNameValidator, primary(), messageSettings, MULTIPLE, null) stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId()) - stream.read(_, 0) >> helper.reply('{ok : 0, errmsg : "failed"}') + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(_, _) >> helper.reply('{ok : 0, errmsg : "failed"}') when: - connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, - new OperationContext()) + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) then: thrown(MongoCommandException) @@ -920,19 +995,18 @@ class InternalStreamConnectionSpecification extends Specification { def callback = new FutureResultCallback() stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultMessageHeader(commandMessage.getId())) } - stream.readAsync(90, _) >> { numBytes, handler -> + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultReply()) } when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) callback.get() then: @@ -952,20 +1026,20 @@ class InternalStreamConnectionSpecification extends Specification { def callback = new FutureResultCallback() stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultMessageHeader(commandMessage.getId())) } - stream.readAsync(90, _) >> { numBytes, handler -> + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultReply()) } when: connection.sendAndReceiveAsync(commandMessage, { BsonReader reader, DecoderContext decoderContext -> throw new CodecConfigurationException('') - }, NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + }, OPERATION_CONTEXT, callback) callback.get() then: @@ -987,13 +1061,12 @@ class InternalStreamConnectionSpecification extends Specification { def callback = new FutureResultCallback() stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.failed(new MongoSocketWriteException('failed', serverAddress, new IOException())) } when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) callback.get() then: @@ -1013,16 +1086,15 @@ class InternalStreamConnectionSpecification extends Specification { def callback = new FutureResultCallback() stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.failed(new MongoSocketReadException('Failed to read', serverAddress)) } when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) callback.get() then: @@ -1042,19 +1114,18 @@ class InternalStreamConnectionSpecification extends Specification { def callback = new FutureResultCallback() stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultMessageHeader(commandMessage.getId())) } - stream.readAsync(90, _) >> { numBytes, handler -> + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> handler.failed(new MongoSocketReadException('Failed to read', serverAddress)) } when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) callback.get() then: @@ -1075,19 +1146,18 @@ class InternalStreamConnectionSpecification extends Specification { def response = '{ok : 0, errmsg : "failed"}' stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultMessageHeader(commandMessage.getId())) } - stream.readAsync(_, _) >> { numBytes, handler -> + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.reply(response)) } when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) callback.get() then: @@ -1107,19 +1177,18 @@ class InternalStreamConnectionSpecification extends Specification { def callback = new FutureResultCallback() stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } - stream.writeAsync(_, _) >> { buffers, handler -> + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> handler.completed(null) } - stream.readAsync(16, _) >> { numBytes, handler -> + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultMessageHeader(commandMessage.getId())) } - stream.readAsync(90, _) >> { numBytes, handler -> + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> handler.completed(helper.defaultReply()) } when: - connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, - IgnorableRequestContext.INSTANCE, new OperationContext(), callback) + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) callback.get() then: diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy index 4ea47fb3694..374687f7d01 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy @@ -29,6 +29,9 @@ import spock.lang.Unroll import javax.management.ObjectName import java.lang.management.ManagementFactory +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY + class JMXConnectionPoolListenerSpecification extends Specification { private static final ServerId SERVER_ID = new ServerId(new ClusterId(), new ServerAddress('host1', 27018)) @@ -43,12 +46,12 @@ class JMXConnectionPoolListenerSpecification extends Specification { given: provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, ConnectionPoolSettings.builder().minSize(0).maxSize(5) - .addConnectionPoolListener(jmxListener).build(), mockSdamProvider()) + .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) provider.ready() when: - provider.get(new OperationContext()) - provider.get(new OperationContext()).close() + provider.get(OPERATION_CONTEXT) + provider.get(OPERATION_CONTEXT).close() then: with(jmxListener.getMBean(SERVER_ID)) { @@ -68,7 +71,7 @@ class JMXConnectionPoolListenerSpecification extends Specification { when: provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, ConnectionPoolSettings.builder().minSize(0).maxSize(5) - .addConnectionPoolListener(jmxListener).build(), mockSdamProvider()) + .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) then: ManagementFactory.getPlatformMBeanServer().isRegistered( @@ -82,7 +85,7 @@ class JMXConnectionPoolListenerSpecification extends Specification { given: provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, ConnectionPoolSettings.builder().minSize(0).maxSize(5) - .addConnectionPoolListener(jmxListener).build(), mockSdamProvider()) + .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) when: provider.close() diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java index 2d3e6dbb49d..ad447f3da65 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java @@ -19,6 +19,7 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoConfigurationException; import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoTimeoutException; import com.mongodb.ServerAddress; import com.mongodb.async.FutureResultCallback; @@ -29,8 +30,8 @@ import com.mongodb.connection.ServerDescription; import com.mongodb.connection.ServerSettings; import com.mongodb.connection.ServerType; -import com.mongodb.selector.ServerSelector; import com.mongodb.lang.NonNull; +import com.mongodb.selector.ServerSelector; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Tag; @@ -50,6 +51,9 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -90,14 +94,14 @@ public void shouldSelectServerWhenThereIsNoSRVLookup() { cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, mock(DnsSrvRecordMonitorFactory.class)); // when - ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), new OperationContext()); + ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), OPERATION_CONTEXT); // then assertServerTupleExpectations(serverAddress, expectedServer, serverTuple); // when FutureResultCallback callback = new FutureResultCallback<>(); - cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback); + cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback); serverTuple = callback.get(); // then @@ -125,7 +129,7 @@ public void shouldSelectServerWhenThereIsSRVLookup() { cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); // when - ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), new OperationContext()); + ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), OPERATION_CONTEXT); // then assertServerTupleExpectations(resolvedServerAddress, expectedServer, serverTuple); @@ -153,7 +157,7 @@ public void shouldSelectServerAsynchronouslyWhenThereIsSRVLookup() { // when FutureResultCallback callback = new FutureResultCallback<>(); - cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback); + cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback); ServerTuple serverTuple = callback.get(); // then @@ -179,7 +183,7 @@ public void shouldFailSelectServerWhenThereIsSRVMisconfiguration() { cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); MongoClientException exception = assertThrows(MongoClientException.class, () -> cluster.selectServer(mock(ServerSelector.class), - new OperationContext())); + OPERATION_CONTEXT)); assertEquals("In load balancing mode, the host must resolve to a single SRV record, but instead it resolved to multiple hosts", exception.getMessage()); } @@ -203,7 +207,7 @@ public void shouldFailSelectServerAsynchronouslyWhenThereIsSRVMisconfiguration() cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); FutureResultCallback callback = new FutureResultCallback<>(); - cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback); + cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback); MongoClientException exception = assertThrows(MongoClientException.class, callback::get); assertEquals("In load balancing mode, the host must resolve to a single SRV record, but instead it resolved to multiple hosts", @@ -218,7 +222,6 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookup() { ClusterableServer expectedServer = mock(ClusterableServer.class); ClusterSettings clusterSettings = ClusterSettings.builder() - .serverSelectionTimeout(5, MILLISECONDS) .mode(ClusterConnectionMode.LOAD_BALANCED) .srvHost(srvHostName) .build(); @@ -232,8 +235,34 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookup() { cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class), - new OperationContext())); - assertEquals("Timed out after 5 ms while waiting to resolve SRV records for foo.bar.com.", exception.getMessage()); + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)))); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + } + + @Test + public void shouldTimeoutSelectServerWhenThereIsSRVLookupAndTimeoutMsIsSet() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)).sleepTime(Duration.ofHours(1))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); + + //when & then + MongoOperationTimeoutException exception = assertThrows(MongoOperationTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5).withTimeout(10L, MILLISECONDS)))); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); } @Test @@ -244,7 +273,6 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookupException() { ClusterableServer expectedServer = mock(ClusterableServer.class); ClusterSettings clusterSettings = ClusterSettings.builder() - .serverSelectionTimeout(10, MILLISECONDS) .mode(ClusterConnectionMode.LOAD_BALANCED) .srvHost(srvHostName) .build(); @@ -259,10 +287,10 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookupException() { cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class), - new OperationContext())); - assertEquals("Timed out after 10 ms while waiting to resolve SRV records for foo.bar.com. " - + "Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'", - exception.getMessage()); + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(10)))); + + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + assertTrue(exception.getMessage().contains("Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'")); } @Test @@ -274,7 +302,6 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookup() { ClusterSettings clusterSettings = ClusterSettings .builder() - .serverSelectionTimeout(5, MILLISECONDS) .mode(ClusterConnectionMode.LOAD_BALANCED) .srvHost(srvHostName) .build(); @@ -288,10 +315,11 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookup() { cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); FutureResultCallback callback = new FutureResultCallback<>(); - cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback); MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, callback::get); - assertEquals("Timed out after 5 ms while waiting to resolve SRV records for foo.bar.com.", exception.getMessage()); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); } @Test @@ -302,7 +330,6 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookupException ClusterableServer expectedServer = mock(ClusterableServer.class); ClusterSettings clusterSettings = ClusterSettings.builder() - .serverSelectionTimeout(10, MILLISECONDS) .mode(ClusterConnectionMode.LOAD_BALANCED) .srvHost(srvHostName) .build(); @@ -317,12 +344,12 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookupException cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory); FutureResultCallback callback = new FutureResultCallback<>(); - cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(10)), callback); MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, callback::get); - assertEquals("Timed out after 10 ms while waiting to resolve SRV records for foo.bar.com. " - + "Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'", - exception.getMessage()); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + assertTrue(exception.getMessage().contains("Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'")); } @Test @@ -368,7 +395,6 @@ public void synchronousConcurrentTest() throws InterruptedException, ExecutionEx ClusterableServer expectedServer = mock(ClusterableServer.class); ClusterSettings clusterSettings = ClusterSettings.builder() - .serverSelectionTimeout(5, MILLISECONDS) .mode(ClusterConnectionMode.LOAD_BALANCED) .srvHost(srvHostName) .build(); @@ -389,7 +415,8 @@ public void synchronousConcurrentTest() throws InterruptedException, ExecutionEx boolean success = false; while (!success) { try { - cluster.selectServer(mock(ServerSelector.class), new OperationContext()); + cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5))); success = true; } catch (MongoTimeoutException e) { // this is expected @@ -397,7 +424,8 @@ public void synchronousConcurrentTest() throws InterruptedException, ExecutionEx } // Keep going for a little while for (int j = 0; j < 100; j++) { - cluster.selectServer(mock(ServerSelector.class), new OperationContext()); + cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5))); } })); } @@ -417,7 +445,6 @@ public void asynchronousConcurrentTest() throws InterruptedException, ExecutionE ClusterableServer expectedServer = mock(ClusterableServer.class); ClusterSettings clusterSettings = ClusterSettings.builder() - .serverSelectionTimeout(5, MILLISECONDS) .mode(ClusterConnectionMode.LOAD_BALANCED) .srvHost(srvHostName) .build(); @@ -447,13 +474,15 @@ public void asynchronousConcurrentTest() throws InterruptedException, ExecutionE while (!dnsSrvRecordMonitorReference.get().isInitialized()) { FutureResultCallback callback = new FutureResultCallback<>(); callbacks.add(callback); - cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback); } // Keep going for a little while for (int j = 0; j < 100; j++) { FutureResultCallback callback = new FutureResultCallback<>(); callbacks.add(callback); - cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback); } })); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy index 9c3fb0d91db..b317f3dd0ba 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy @@ -30,6 +30,7 @@ import com.mongodb.event.CommandListener import com.mongodb.event.CommandStartedEvent import com.mongodb.event.CommandSucceededEvent import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.diagnostics.logging.Logger import com.mongodb.internal.logging.StructuredLogger import com.mongodb.internal.validator.NoOpFieldNameValidator @@ -39,6 +40,7 @@ import org.bson.BsonInt32 import org.bson.BsonString import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE import static com.mongodb.connection.ClusterConnectionMode.SINGLE import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION @@ -57,14 +59,14 @@ class LoggingCommandEventSenderSpecification extends Specification { def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, MULTIPLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - message.encode(bsonOutput, NoOpSessionContext.INSTANCE) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) def logger = Stub(Logger) { isDebugEnabled() >> debugLoggingEnabled } - def context = new OperationContext() + def operationContext = OPERATION_CONTEXT def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, commandListener, - IgnorableRequestContext.INSTANCE, context, message, bsonOutput, new StructuredLogger(logger), - LoggerSettings.builder().build()) + operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) when: sender.sendStartedEvent() @@ -73,17 +75,17 @@ class LoggingCommandEventSenderSpecification extends Specification { sender.sendFailedEvent(failureException) then: - commandListener.eventsWereDelivered( - [ - new CommandStartedEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName, - commandDocument.getFirstKey(), commandDocument.append('$db', new BsonString(namespace.databaseName))), - new CommandSucceededEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName, - commandDocument.getFirstKey(), new BsonDocument(), 1), - new CommandSucceededEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName, - commandDocument.getFirstKey(), replyDocument, 1), - new CommandFailedEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName, - commandDocument.getFirstKey(), 1, failureException) - ]) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, operationContext.id, message.getId(), connectionDescription, + namespace.databaseName, commandDocument.getFirstKey(), + commandDocument.append('$db', new BsonString(namespace.databaseName))), + new CommandSucceededEvent(null, operationContext.id, message.getId(), connectionDescription, + namespace.databaseName, commandDocument.getFirstKey(), new BsonDocument(), 1), + new CommandSucceededEvent(null, operationContext.id, message.getId(), connectionDescription, + namespace.databaseName, commandDocument.getFirstKey(), replyDocument, 1), + new CommandFailedEvent(null, operationContext.id, message.getId(), connectionDescription, + namespace.databaseName, commandDocument.getFirstKey(), 1, failureException) + ]) where: debugLoggingEnabled << [true, false] @@ -102,13 +104,14 @@ class LoggingCommandEventSenderSpecification extends Specification { def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, MULTIPLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - message.encode(bsonOutput, NoOpSessionContext.INSTANCE) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) def logger = Mock(Logger) { isDebugEnabled() >> true } - def operationContext = new OperationContext() + def operationContext = OPERATION_CONTEXT def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, commandListener, - IgnorableRequestContext.INSTANCE, operationContext, message, bsonOutput, new StructuredLogger(logger), + operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) when: sender.sendStartedEvent() @@ -158,14 +161,15 @@ class LoggingCommandEventSenderSpecification extends Specification { def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, SINGLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - message.encode(bsonOutput, NoOpSessionContext.INSTANCE) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) def logger = Mock(Logger) { isDebugEnabled() >> true } - def operationContext = new OperationContext() + def operationContext = OPERATION_CONTEXT - def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, null, null, - operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) + def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, null, operationContext, + message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) when: sender.sendStartedEvent() @@ -191,14 +195,14 @@ class LoggingCommandEventSenderSpecification extends Specification { def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, SINGLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - message.encode(bsonOutput, NoOpSessionContext.INSTANCE) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) def logger = Mock(Logger) { isDebugEnabled() >> true } - def operationContext = new OperationContext() + def operationContext = OPERATION_CONTEXT def sender = new LoggingCommandEventSender(['createUser'] as Set, [] as Set, connectionDescription, null, - IgnorableRequestContext.INSTANCE, operationContext, message, bsonOutput, new StructuredLogger(logger), - LoggerSettings.builder().build()) + operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) when: sender.sendStartedEvent() diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy index f14305bb6b8..e0f932f4963 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.connection - import com.mongodb.ServerAddress import com.mongodb.connection.ClusterDescription import com.mongodb.connection.ClusterId @@ -29,6 +28,7 @@ import com.mongodb.internal.selector.WritableServerSelector import org.bson.types.ObjectId import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE import static com.mongodb.connection.ClusterType.REPLICA_SET import static com.mongodb.connection.ClusterType.SHARDED @@ -94,7 +94,9 @@ class MultiServerClusterSpecification extends Specification { cluster.close() when: - cluster.getServersSnapshot() + cluster.getServersSnapshot( + OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(), + OPERATION_CONTEXT.getTimeoutContext()) then: thrown(IllegalStateException) @@ -379,7 +381,7 @@ class MultiServerClusterSpecification extends Specification { cluster.close() when: - cluster.selectServer(new WritableServerSelector(), new OperationContext()) + cluster.selectServer(new WritableServerSelector(), OPERATION_CONTEXT) then: thrown(IllegalStateException) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java index e4a4f80289c..12d8e9fa7c3 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.ClusterFixture.getServerApi; import static com.mongodb.internal.connection.MessageHelper.getApiVersionField; import static com.mongodb.internal.connection.MessageHelper.getDbField; @@ -53,7 +54,7 @@ public void before() { public void testSuccessfulAuthentication() { enqueueSuccessfulReply(); - subject.authenticate(connection, connectionDescription); + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); validateMessages(); } @@ -63,7 +64,7 @@ public void testSuccessfulAuthenticationAsync() throws ExecutionException, Inter enqueueSuccessfulReply(); FutureResultCallback futureCallback = new FutureResultCallback<>(); - subject.authenticateAsync(connection, connectionDescription, futureCallback); + subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback); futureCallback.get(); validateMessages(); diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy index 0bf71212f10..069ece30dbe 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy @@ -16,13 +16,14 @@ package com.mongodb.internal.connection - import com.mongodb.MongoCommandException import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNodeIsRecoveringException import com.mongodb.MongoNotPrimaryException +import com.mongodb.MongoOperationTimeoutException import com.mongodb.MongoQueryException import com.mongodb.ServerAddress +import com.mongodb.internal.TimeoutContext import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonDouble @@ -32,6 +33,7 @@ import org.bson.BsonNull import org.bson.BsonString import spock.lang.Specification +import static com.mongodb.ClusterFixture.* import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException import static com.mongodb.internal.connection.ProtocolHelper.getQueryFailureException import static com.mongodb.internal.connection.ProtocolHelper.isCommandOk @@ -71,18 +73,37 @@ class ProtocolHelperSpecification extends Specification { def 'command failure exception should be MongoExecutionTimeoutException if error code is 50'() { expect: getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)), - new ServerAddress()) instanceof MongoExecutionTimeoutException + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) + instanceof MongoExecutionTimeoutException + } + + def 'command failure exception should be MongoOperationTimeoutException if error code is 50 and timeoutMS is set'() { + expect: + getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) + instanceof MongoOperationTimeoutException } def 'query failure exception should be MongoExecutionTimeoutException if error code is 50'() { expect: getQueryFailureException(new BsonDocument('code', new BsonInt32(50)), - new ServerAddress()) instanceof MongoExecutionTimeoutException + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) + instanceof MongoExecutionTimeoutException + } + + def 'query failure exception should be MongoOperationTimeoutException if error code is 50'() { + expect: + def exception = getQueryFailureException(new BsonDocument('code', new BsonInt32(50)), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) + exception instanceof MongoOperationTimeoutException + exception.getCause() instanceof MongoExecutionTimeoutException + } def 'command failure exceptions should handle MongoNotPrimaryException scenarios'() { expect: - getCommandFailureException(exception, new ServerAddress()) instanceof MongoNotPrimaryException + getCommandFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) + instanceof MongoNotPrimaryException where: exception << [ @@ -94,7 +115,8 @@ class ProtocolHelperSpecification extends Specification { def 'query failure exceptions should handle MongoNotPrimaryException scenarios'() { expect: - getQueryFailureException(exception, new ServerAddress()) instanceof MongoNotPrimaryException + getQueryFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) + instanceof MongoNotPrimaryException where: exception << [ @@ -106,7 +128,8 @@ class ProtocolHelperSpecification extends Specification { def 'command failure exceptions should handle MongoNodeIsRecoveringException scenarios'() { expect: - getCommandFailureException(exception, new ServerAddress()) instanceof MongoNodeIsRecoveringException + getCommandFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) + instanceof MongoNodeIsRecoveringException where: exception << [ @@ -121,7 +144,8 @@ class ProtocolHelperSpecification extends Specification { def 'query failure exceptions should handle MongoNodeIsRecoveringException scenarios'() { expect: - getQueryFailureException(exception, new ServerAddress()) instanceof MongoNodeIsRecoveringException + getQueryFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) + instanceof MongoNodeIsRecoveringException where: exception << [ @@ -137,13 +161,13 @@ class ProtocolHelperSpecification extends Specification { def 'command failure exception should be MongoCommandException'() { expect: getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('errmsg', new BsonString('some other problem')), - new ServerAddress()) instanceof MongoCommandException + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoCommandException } def 'query failure exception should be MongoQueryException'() { expect: getQueryFailureException(new BsonDocument('$err', new BsonString('some other problem')), - new ServerAddress()) instanceof MongoQueryException + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoQueryException } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java new file mode 100644 index 00000000000..b44afb7a725 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; + + +public class RoundTripTimeSamplerTest { + + @ParameterizedTest(name = "{index}: samples: {0}. Expected: average: {1} min: {2}") + @DisplayName("RoundTripTimeSampler should calculate the expected average and min round trip times") + @MethodSource + public void testRoundTripTimeSampler(final List samples, final int expectedAverageRTT, final int expectedMinRTT) { + RoundTripTimeSampler sampler = new RoundTripTimeSampler(); + samples.forEach(sampler::addSample); + + assertEquals(expectedMinRTT, sampler.getMin()); + assertEquals(expectedAverageRTT, sampler.getAverage()); + } + + private static Stream testRoundTripTimeSampler() { + return Stream.of( + Arguments.of(emptyList(), 0, 0), + Arguments.of(singletonList(10), 10, 0), + Arguments.of(asList(10, 20), 12, 10), + Arguments.of(asList(10, 20, 8), 11, 8), + Arguments.of(asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), 11, 6) + ); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy index 32295d12b7c..21f9bc28161 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy @@ -23,6 +23,7 @@ import com.mongodb.connection.ClusterId import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ServerId import com.mongodb.connection.ServerType +import com.mongodb.internal.TimeoutSettings import org.bson.BsonDocument import spock.lang.Specification @@ -34,11 +35,13 @@ import static com.mongodb.MongoCredential.createScramSha1Credential import static com.mongodb.MongoCredential.createScramSha256Credential import static com.mongodb.connection.ClusterConnectionMode.SINGLE import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply +import static com.mongodb.internal.connection.OperationContext.simpleOperationContext import static org.junit.Assert.assertEquals class ScramShaAuthenticatorSpecification extends Specification { def serverId = new ServerId(new ClusterId(), new ServerAddress('localhost', 27017)) def connectionDescription = new ConnectionDescription(serverId) + def operationContext = simpleOperationContext(TimeoutSettings.DEFAULT, null) private final static MongoCredentialWithCache SHA1_CREDENTIAL = new MongoCredentialWithCache(createScramSha1Credential('user', 'database', 'pencil' as char[])) private final static MongoCredentialWithCache SHA256_CREDENTIAL = @@ -522,10 +525,10 @@ class ScramShaAuthenticatorSpecification extends Specification { def authenticate(TestInternalConnection connection, ScramShaAuthenticator authenticator, boolean async) { if (async) { FutureResultCallback futureCallback = new FutureResultCallback() - authenticator.authenticateAsync(connection, connectionDescription, futureCallback) + authenticator.authenticateAsync(connection, connectionDescription, operationContext, futureCallback) futureCallback.get(5, TimeUnit.SECONDS) } else { - authenticator.authenticate(connection, connectionDescription) + authenticator.authenticate(connection, connectionDescription, operationContext) } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java index 816bca3f3f9..f1c8f69eb29 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java @@ -32,6 +32,8 @@ import java.util.List; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; import static org.junit.jupiter.api.Assertions.assertAll; @@ -51,7 +53,7 @@ final class ServerDeprioritizationTest { @BeforeEach void beforeEach() { - serverDeprioritization = new OperationContext().getServerDeprioritization(); + serverDeprioritization = createOperationContext(TIMEOUT_SETTINGS).getServerDeprioritization(); } @Test diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java index 2bc41fee1be..4a2e94c19a5 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java @@ -19,6 +19,7 @@ import com.mongodb.ServerAddress; import com.mongodb.connection.ClusterType; import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.time.Timeout; import org.bson.BsonDocument; import org.bson.BsonNull; import org.bson.BsonValue; @@ -30,6 +31,7 @@ import java.net.URISyntaxException; import java.util.Collection; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.ClusterFixture.getClusterDescription; import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; import static com.mongodb.internal.event.EventListenerHelper.NO_OP_CLUSTER_LISTENER; @@ -120,7 +122,10 @@ private void assertServer(final String serverName, final BsonDocument expectedSe if (expectedServerDescriptionDocument.isDocument("pool")) { int expectedGeneration = expectedServerDescriptionDocument.getDocument("pool").getNumber("generation").intValue(); - DefaultServer server = (DefaultServer) getCluster().getServersSnapshot().getServer(new ServerAddress(serverName)); + Timeout serverSelectionTimeout = OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(); + DefaultServer server = (DefaultServer) getCluster() + .getServersSnapshot(serverSelectionTimeout, OPERATION_CONTEXT.getTimeoutContext()) + .getServer(new ServerAddress(serverName)); assertEquals(expectedGeneration, server.getConnectionPool().getGeneration()); } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java index 5b68d7f84bb..9a7a8492563 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java @@ -43,7 +43,7 @@ public ServerSelectionRttTest(final String description, final BsonDocument defin @Test public void shouldPassAllOutcomes() { - ExponentiallyWeightedMovingAverage subject = new ExponentiallyWeightedMovingAverage(0.2); + RoundTripTimeSampler subject = new RoundTripTimeSampler(); BsonValue current = definition.get("avg_rtt_ms"); if (current.isNumber()) { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java index 6f1a9d25bb1..878876d74bd 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java @@ -40,6 +40,8 @@ import java.util.Map; import java.util.stream.IntStream; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; import static com.mongodb.connection.ServerSelectionSelectionTest.buildClusterDescription; import static java.util.stream.Collectors.groupingBy; import static java.util.stream.Collectors.toList; @@ -74,7 +76,8 @@ public ServerSelectionWithinLatencyWindowTest( @Test public void shouldPassAllOutcomes() { ServerSelector selector = new ReadPreferenceServerSelector(ReadPreference.nearest()); - OperationContext.ServerDeprioritization emptyServerDeprioritization = new OperationContext().getServerDeprioritization(); + OperationContext.ServerDeprioritization emptyServerDeprioritization = createOperationContext(TIMEOUT_SETTINGS) + .getServerDeprioritization(); ClusterSettings defaultClusterSettings = ClusterSettings.builder().build(); Map> selectionResultsGroupedByServerAddress = IntStream.range(0, iterations) .mapToObj(i -> BaseCluster.createCompleteSelectorAndSelectServer(selector, clusterDescription, serversSnapshot, diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy index a3a0f6a2d6f..3ebd5c4eb0f 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy @@ -28,6 +28,7 @@ import com.mongodb.event.ClusterListener import com.mongodb.internal.selector.WritableServerSelector import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.connection.ClusterConnectionMode.SINGLE import static com.mongodb.connection.ClusterType.REPLICA_SET import static com.mongodb.connection.ClusterType.UNKNOWN @@ -76,7 +77,10 @@ class SingleServerClusterSpecification extends Specification { sendNotification(firstServer, STANDALONE) then: - cluster.getServersSnapshot().getServer(firstServer) == factory.getServer(firstServer) + cluster.getServersSnapshot(OPERATION_CONTEXT + .getTimeoutContext() + .computeServerSelectionTimeout(), + OPERATION_CONTEXT.getTimeoutContext()).getServer(firstServer) == factory.getServer(firstServer) cleanup: cluster?.close() @@ -90,7 +94,8 @@ class SingleServerClusterSpecification extends Specification { cluster.close() when: - cluster.getServersSnapshot() + cluster.getServersSnapshot(OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(), + OPERATION_CONTEXT.getTimeoutContext()) then: thrown(IllegalStateException) @@ -140,7 +145,7 @@ class SingleServerClusterSpecification extends Specification { sendNotification(firstServer, getBuilder(firstServer).minWireVersion(1000).maxWireVersion(1000).build()) when: - cluster.selectServer(new WritableServerSelector(), new OperationContext()) + cluster.selectServer(new WritableServerSelector(), OPERATION_CONTEXT) then: thrown(MongoIncompatibleDriverException) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy index 7745d9580ff..855951d425a 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy @@ -16,9 +16,12 @@ package com.mongodb.internal.connection +import com.mongodb.ClusterFixture import com.mongodb.MongoNamespace import com.mongodb.ReadPreference import com.mongodb.async.FutureResultCallback +import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonBinaryWriter import org.bson.BsonDocument @@ -168,7 +171,10 @@ class StreamHelper { new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new NoOpFieldNameValidator(), ReadPreference.primary(), MessageSettings.builder().build(), SINGLE, null) OutputBuffer outputBuffer = new BasicOutputBuffer() - command.encode(outputBuffer, NoOpSessionContext.INSTANCE) + command.encode(outputBuffer, new OperationContext( + IgnorableRequestContext.INSTANCE, + NoOpSessionContext.INSTANCE, + new TimeoutContext(ClusterFixture.TIMEOUT_SETTINGS), null)) nextMessageId++ [outputBuffer.byteBuffers, nextMessageId] } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java index d9491d79c4b..7811cdec815 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java @@ -19,8 +19,6 @@ import com.mongodb.ReadPreference; import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.binding.BindingContext; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.FieldNameValidator; @@ -59,31 +57,32 @@ public ConnectionDescription getDescription() { @Override public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, - final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context) { - return executeEnqueuedCommandBasedProtocol(context.getSessionContext()); + final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext) { + return executeEnqueuedCommandBasedProtocol(operationContext); } @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, - final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context, + final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) { - return executeEnqueuedCommandBasedProtocol(context.getSessionContext()); + return executeEnqueuedCommandBasedProtocol(operationContext); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, - final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context, + final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final SingleResultCallback callback) { - executeEnqueuedCommandBasedProtocolAsync(context.getSessionContext(), callback); + executeEnqueuedCommandBasedProtocolAsync(operationContext, callback); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, - final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context, + final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback callback) { - executeEnqueuedCommandBasedProtocolAsync(context.getSessionContext(), callback); + executeEnqueuedCommandBasedProtocolAsync(operationContext, callback); } @Override @@ -92,13 +91,14 @@ public void markAsPinned(final PinningMode pinningMode) { } @SuppressWarnings("unchecked") - private T executeEnqueuedCommandBasedProtocol(final SessionContext sessionContext) { - return (T) executor.execute(enqueuedCommandProtocol, internalConnection, sessionContext); + private T executeEnqueuedCommandBasedProtocol(final OperationContext operationContext) { + return (T) executor.execute(enqueuedCommandProtocol, internalConnection, operationContext.getSessionContext()); } @SuppressWarnings("unchecked") - private void executeEnqueuedCommandBasedProtocolAsync(final SessionContext sessionContext, final SingleResultCallback callback) { - executor.executeAsync(enqueuedCommandProtocol, internalConnection, sessionContext, callback); + private void executeEnqueuedCommandBasedProtocolAsync(final OperationContext operationContext, + final SingleResultCallback callback) { + executor.executeAsync(enqueuedCommandProtocol, internalConnection, operationContext.getSessionContext(), callback); } void enqueueProtocol(final CommandProtocol protocol) { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java index 479ae6ed921..008ae7bf7b7 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java @@ -17,18 +17,15 @@ package com.mongodb.internal.connection; import com.mongodb.MongoException; -import com.mongodb.RequestContext; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.ByteBuf; import org.bson.codecs.Decoder; import org.bson.types.ObjectId; import java.util.List; -import java.util.concurrent.TimeUnit; public class TestConnectionPool implements ConnectionPool { @@ -48,23 +45,22 @@ public ByteBuf getBuffer(final int capacity) { } @Override - public void sendMessage(final List byteBuffers, final int lastRequestId) { + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { throw new UnsupportedOperationException("Not implemented yet!"); } @Override - public T sendAndReceive(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext) { + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { throw new UnsupportedOperationException("Not implemented yet!"); } @Override - public void send(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext) { + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { throw new UnsupportedOperationException(); } @Override - public T receive(final Decoder decoder, final SessionContext sessionContext) { + public T receive(final Decoder decoder, final OperationContext operationContext) { throw new UnsupportedOperationException(); } @@ -75,24 +71,24 @@ public boolean hasMoreToCome() { @Override public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, - final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { throw new UnsupportedOperationException("Not implemented yet!"); } @Override - public ResponseBuffers receiveMessage(final int responseTo) { + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { throw new UnsupportedOperationException("Not implemented yet!"); } @Override - public void sendMessageAsync(final List byteBuffers, final int lastRequestId, + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, final SingleResultCallback callback) { throw new UnsupportedOperationException("Not implemented yet!"); } @Override - public void receiveMessageAsync(final int responseTo, final SingleResultCallback callback) { + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { throw new UnsupportedOperationException("Not implemented yet!"); } @@ -107,12 +103,12 @@ public ServerDescription getInitialServerDescription() { } @Override - public void open() { + public void open(final OperationContext operationContext) { throw new UnsupportedOperationException("Not implemented yet"); } @Override - public void openAsync(final SingleResultCallback callback) { + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { callback.onResult(null, new UnsupportedOperationException("Not implemented yet")); } @@ -138,20 +134,12 @@ public int getGeneration() { }; } - @Override - public InternalConnection get(final OperationContext operationContext, final long timeout, final TimeUnit timeUnit) { - if (exceptionToThrow != null) { - throw exceptionToThrow; - } - return get(operationContext); - } - @Override public void getAsync(final OperationContext operationContext, final SingleResultCallback callback) { if (exceptionToThrow != null) { callback.onResult(null, exceptionToThrow); } else { - callback.onResult(get(new OperationContext()), null); + callback.onResult(get(operationContext), null); } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java index 9d8eda976d6..12008cdec93 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java @@ -28,6 +28,9 @@ import com.mongodb.event.ConnectionPoolListener; import com.mongodb.event.ConnectionPoolReadyEvent; import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.internal.time.StartTime; +import com.mongodb.internal.time.TimePointTest; +import com.mongodb.internal.time.Timeout; import java.util.ArrayList; import java.util.Arrays; @@ -84,6 +87,22 @@ public int countEvents(final Class eventClass) { return eventCount; } + public void waitForEvents(final List> eventClasses, final long time, final TimeUnit unit) + throws InterruptedException, TimeoutException { + Timeout timeout = StartTime.now().timeoutAfterOrInfiniteIfNegative(time, unit); + ArrayList seen = new ArrayList<>(); + + for (Class eventClass : eventClasses) { + waitForEvent(eventClass, 1, TimePointTest.remaining(timeout, unit), unit); + + if (TimePointTest.hasExpired(timeout)) { + throw new TimeoutException("Timed out waiting for event of type " + eventClass + + ". Timing out after seeing " + seen); + } + seen.add(eventClass); + } + } + public void waitForEvent(final Class eventClass, final int count, final long time, final TimeUnit unit) throws InterruptedException, TimeoutException { lock.lock(); @@ -106,6 +125,7 @@ public void waitForEvent(final Class eventClass, final int count, final l } } + private boolean containsEvent(final Class eventClass, final int expectedEventCount) { return countEvents(eventClass) >= expectedEventCount; } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java index 8e99c89c20d..2853780f93a 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java @@ -17,14 +17,14 @@ package com.mongodb.internal.connection; import com.mongodb.MongoException; -import com.mongodb.RequestContext; +import com.mongodb.ServerAddress; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerConnectionState; import com.mongodb.connection.ServerDescription; import com.mongodb.connection.ServerId; import com.mongodb.connection.ServerType; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.session.SessionContext; import org.bson.BsonBinaryReader; import org.bson.BsonDocument; import org.bson.ByteBuf; @@ -55,6 +55,7 @@ private static class Interaction { } private final ConnectionDescription description; + private final ServerDescription serverDescription; private final BufferProvider bufferProvider; private final Deque replies; private final List sent; @@ -68,6 +69,10 @@ private static class Interaction { TestInternalConnection(final ServerId serverId, final ServerType serverType) { this.description = new ConnectionDescription(new ConnectionId(serverId), LATEST_WIRE_VERSION, serverType, 0, 0, 0, Collections.emptyList()); + this.serverDescription = ServerDescription.builder() + .address(new ServerAddress("localhost", 27017)) + .type(serverType) + .state(ServerConnectionState.CONNECTED).build(); this.bufferProvider = new SimpleBufferProvider(); this.replies = new LinkedList<>(); @@ -103,15 +108,15 @@ public ConnectionDescription getDescription() { @Override public ServerDescription getInitialServerDescription() { - throw new UnsupportedOperationException(); + return serverDescription; } - public void open() { + public void open(final OperationContext operationContext) { opened = true; } @Override - public void openAsync(final SingleResultCallback callback) { + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { opened = true; callback.onResult(null, null); } @@ -137,7 +142,7 @@ public int getGeneration() { } @Override - public void sendMessage(final List byteBuffers, final int lastRequestId) { + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { // repackage all byte buffers into a single byte buffer... int totalSize = 0; for (ByteBuf buf : byteBuffers) { @@ -164,30 +169,29 @@ public void sendMessage(final List byteBuffers, final int lastRequestId } @Override - public T sendAndReceive(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext) { + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) { - message.encode(bsonOutput, sessionContext); - sendMessage(bsonOutput.getByteBuffers(), message.getId()); + message.encode(bsonOutput, operationContext); + sendMessage(bsonOutput.getByteBuffers(), message.getId(), operationContext); } - try (ResponseBuffers responseBuffers = receiveMessage(message.getId())) { + try (ResponseBuffers responseBuffers = receiveMessage(message.getId(), operationContext)) { boolean commandOk = isCommandOk(new BsonBinaryReader(new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer()))); responseBuffers.reset(); if (!commandOk) { throw getCommandFailureException(getResponseDocument(responseBuffers, message, new BsonDocumentCodec()), - description.getServerAddress()); + description.getServerAddress(), operationContext.getTimeoutContext()); } return new ReplyMessage<>(responseBuffers, decoder, message.getId()).getDocument(); } } @Override - public void send(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext) { + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { throw new UnsupportedOperationException(); } @Override - public T receive(final Decoder decoder, final SessionContext sessionContext) { + public T receive(final Decoder decoder, final OperationContext operationContext) { throw new UnsupportedOperationException(); } @@ -204,11 +208,10 @@ private T getResponseDocument(final ResponseBuffers res } @Override - public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, - final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext, + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, final OperationContext operationContext, final SingleResultCallback callback) { try { - T result = sendAndReceive(message, decoder, sessionContext, requestContext, operationContext); + T result = sendAndReceive(message, decoder, operationContext); callback.onResult(result, null); } catch (MongoException ex) { callback.onResult(null, ex); @@ -233,7 +236,7 @@ private ReplyHeader replaceResponseTo(final ReplyHeader header, final int respon return new ReplyHeader(buffer, messageHeader); } @Override - public ResponseBuffers receiveMessage(final int responseTo) { + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { if (this.replies.isEmpty()) { throw new MongoException("Test was not setup properly as too many calls to receiveMessage occured."); } @@ -247,9 +250,10 @@ public ResponseBuffers receiveMessage(final int responseTo) { } @Override - public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final SingleResultCallback callback) { + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { try { - sendMessage(byteBuffers, lastRequestId); + sendMessage(byteBuffers, lastRequestId, operationContext); callback.onResult(null, null); } catch (Exception e) { callback.onResult(null, e); @@ -257,9 +261,10 @@ public void sendMessageAsync(final List byteBuffers, final int lastRequ } @Override - public void receiveMessageAsync(final int responseTo, final SingleResultCallback callback) { + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { try { - ResponseBuffers buffers = receiveMessage(responseTo); + ResponseBuffers buffers = receiveMessage(responseTo, operationContext); callback.onResult(buffers, null); } catch (MongoException ex) { callback.onResult(null, ex); diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java index 0e53c55fc03..7669eab9b91 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java @@ -16,14 +16,12 @@ package com.mongodb.internal.connection; -import com.mongodb.RequestContext; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ConnectionId; import com.mongodb.connection.ServerDescription; import com.mongodb.connection.ServerId; import com.mongodb.connection.ServerType; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.session.SessionContext; import org.bson.ByteBuf; import org.bson.codecs.Decoder; @@ -69,12 +67,12 @@ public int getGeneration() { return generation; } - public void open() { + public void open(final OperationContext operationContext) { opened = true; } @Override - public void openAsync(final SingleResultCallback callback) { + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { opened = true; callback.onResult(null, null); } @@ -100,21 +98,20 @@ public ByteBuf getBuffer(final int size) { } @Override - public void sendMessage(final List byteBuffers, final int lastRequestId) { + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { } @Override - public T sendAndReceive(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext, - final RequestContext requestContext, final OperationContext operationContext) { + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { return null; } @Override - public void send(final CommandMessage message, final Decoder decoder, final SessionContext sessionContext) { + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { } @Override - public T receive(final Decoder decoder, final SessionContext sessionContext) { + public T receive(final Decoder decoder, final OperationContext operationContext) { return null; } @@ -125,23 +122,24 @@ public boolean hasMoreToCome() { @Override public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, - final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext, - final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { callback.onResult(null, null); } @Override - public ResponseBuffers receiveMessage(final int responseTo) { + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { return null; } @Override - public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final SingleResultCallback callback) { + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { callback.onResult(null, null); } @Override - public void receiveMessageAsync(final int responseTo, final SingleResultCallback callback) { + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { callback.onResult(null, null); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java index 970bfd42ff1..6fd27893c70 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java @@ -17,17 +17,22 @@ package com.mongodb.internal.connection; import com.mongodb.MongoTimeoutException; +import com.mongodb.internal.TimeoutSettings; import java.util.concurrent.CountDownLatch; +import static com.mongodb.ClusterFixture.createOperationContext; + class TimeoutTrackingConnectionGetter implements Runnable { private final ConnectionPool connectionPool; + private final TimeoutSettings timeoutSettings; private final CountDownLatch latch = new CountDownLatch(1); private volatile boolean gotTimeout; - TimeoutTrackingConnectionGetter(final ConnectionPool connectionPool) { + TimeoutTrackingConnectionGetter(final ConnectionPool connectionPool, final TimeoutSettings timeoutSettings) { this.connectionPool = connectionPool; + this.timeoutSettings = timeoutSettings; } boolean isGotTimeout() { @@ -37,7 +42,7 @@ boolean isGotTimeout() { @Override public void run() { try { - InternalConnection connection = connectionPool.get(new OperationContext()); + InternalConnection connection = connectionPool.get(createOperationContext(timeoutSettings)); connection.close(); } catch (MongoTimeoutException e) { gotTimeout = true; diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy index 8eb75a44d2f..d2e5414bd56 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy @@ -16,19 +16,18 @@ package com.mongodb.internal.connection - import com.mongodb.MongoNamespace import com.mongodb.ServerAddress import com.mongodb.async.FutureResultCallback import com.mongodb.connection.ClusterId import com.mongodb.connection.ServerId -import com.mongodb.internal.IgnorableRequestContext import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonDocument import org.bson.BsonInt32 import org.bson.codecs.BsonDocumentCodec import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ReadPreference.primary import static com.mongodb.connection.ClusterConnectionMode.SINGLE @@ -51,7 +50,7 @@ class UsageTrackingConnectionSpecification extends Specification { connection.openedAt == Long.MAX_VALUE when: - connection.open() + connection.open(OPERATION_CONTEXT) then: connection.openedAt <= System.currentTimeMillis() @@ -67,7 +66,7 @@ class UsageTrackingConnectionSpecification extends Specification { connection.openedAt == Long.MAX_VALUE when: - connection.openAsync(futureResultCallback) + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() then: @@ -82,7 +81,7 @@ class UsageTrackingConnectionSpecification extends Specification { connection.lastUsedAt == Long.MAX_VALUE when: - connection.open() + connection.open(OPERATION_CONTEXT) then: connection.lastUsedAt <= System.currentTimeMillis() @@ -98,7 +97,7 @@ class UsageTrackingConnectionSpecification extends Specification { connection.lastUsedAt == Long.MAX_VALUE when: - connection.openAsync(futureResultCallback) + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() then: @@ -108,11 +107,11 @@ class UsageTrackingConnectionSpecification extends Specification { def 'lastUsedAt should be set on sendMessage'() { given: def connection = createConnection() - connection.open() + connection.open(OPERATION_CONTEXT) def openedLastUsedAt = connection.lastUsedAt when: - connection.sendMessage(Arrays.asList(), 1) + connection.sendMessage([], 1, OPERATION_CONTEXT) then: connection.lastUsedAt >= openedLastUsedAt @@ -123,12 +122,12 @@ class UsageTrackingConnectionSpecification extends Specification { def 'lastUsedAt should be set on sendMessage asynchronously'() { given: def connection = createConnection() - connection.open() + connection.open(OPERATION_CONTEXT) def openedLastUsedAt = connection.lastUsedAt def futureResultCallback = new FutureResultCallback() when: - connection.sendMessageAsync(Arrays.asList(), 1, futureResultCallback) + connection.sendMessageAsync([], 1, OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() then: @@ -139,10 +138,10 @@ class UsageTrackingConnectionSpecification extends Specification { def 'lastUsedAt should be set on receiveMessage'() { given: def connection = createConnection() - connection.open() + connection.open(OPERATION_CONTEXT) def openedLastUsedAt = connection.lastUsedAt when: - connection.receiveMessage(1) + connection.receiveMessage(1, OPERATION_CONTEXT) then: connection.lastUsedAt >= openedLastUsedAt @@ -152,12 +151,12 @@ class UsageTrackingConnectionSpecification extends Specification { def 'lastUsedAt should be set on receiveMessage asynchronously'() { given: def connection = createConnection() - connection.open() + connection.open(OPERATION_CONTEXT) def openedLastUsedAt = connection.lastUsedAt def futureResultCallback = new FutureResultCallback() when: - connection.receiveMessageAsync(1, futureResultCallback) + connection.receiveMessageAsync(1, OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() then: @@ -168,14 +167,13 @@ class UsageTrackingConnectionSpecification extends Specification { def 'lastUsedAt should be set on sendAndReceive'() { given: def connection = createConnection() - connection.open() + connection.open(OPERATION_CONTEXT) def openedLastUsedAt = connection.lastUsedAt when: connection.sendAndReceive(new CommandMessage(new MongoNamespace('test.coll'), new BsonDocument('ping', new BsonInt32(1)), new NoOpFieldNameValidator(), primary(), - MessageSettings.builder().build(), SINGLE, null), - new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext()) + MessageSettings.builder().build(), SINGLE, null), new BsonDocumentCodec(), OPERATION_CONTEXT) then: connection.lastUsedAt >= openedLastUsedAt @@ -185,7 +183,7 @@ class UsageTrackingConnectionSpecification extends Specification { def 'lastUsedAt should be set on sendAndReceive asynchronously'() { given: def connection = createConnection() - connection.open() + connection.open(OPERATION_CONTEXT) def openedLastUsedAt = connection.lastUsedAt def futureResultCallback = new FutureResultCallback() @@ -193,8 +191,7 @@ class UsageTrackingConnectionSpecification extends Specification { connection.sendAndReceiveAsync(new CommandMessage(new MongoNamespace('test.coll'), new BsonDocument('ping', new BsonInt32(1)), new NoOpFieldNameValidator(), primary(), MessageSettings.builder().build(), SINGLE, null), - new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext(), - futureResultCallback) + new BsonDocumentCodec(), OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() then: diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java index e2ea7939880..5326c8c723d 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.ClusterFixture.getServerApi; import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply; @@ -56,7 +57,8 @@ public void before() { public void testSuccessfulAuthentication() { enqueueSuccessfulAuthenticationReply(); - new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi()).authenticate(connection, connectionDescriptionThreeSix); + new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi()) + .authenticate(connection, connectionDescriptionThreeSix, OPERATION_CONTEXT); validateMessages(); } @@ -67,7 +69,7 @@ public void testSuccessfulAuthenticationAsync() throws ExecutionException, Inter FutureResultCallback futureCallback = new FutureResultCallback<>(); new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi()).authenticateAsync(connection, - connectionDescriptionThreeSix, futureCallback); + connectionDescriptionThreeSix, OPERATION_CONTEXT, futureCallback); futureCallback.get(); diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java index 92ff72fde83..a8b2d7b71d5 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java @@ -30,8 +30,8 @@ import org.junit.Test; import java.util.List; -import java.util.concurrent.ExecutionException; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.ClusterFixture.getServerApi; import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply; import static com.mongodb.internal.connection.MessageHelper.getApiVersionField; @@ -58,7 +58,7 @@ public void testFailedAuthentication() { enqueueFailedAuthenticationReply(); try { - subject.authenticate(connection, connectionDescription); + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); fail(); } catch (MongoSecurityException e) { // all good @@ -70,7 +70,7 @@ public void testFailedAuthenticationAsync() { enqueueFailedAuthenticationReply(); FutureResultCallback futureCallback = new FutureResultCallback<>(); - subject.authenticateAsync(connection, connectionDescription, futureCallback); + subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback); try { futureCallback.get(); @@ -92,17 +92,17 @@ private void enqueueFailedAuthenticationReply() { public void testSuccessfulAuthentication() { enqueueSuccessfulAuthenticationReply(); - subject.authenticate(connection, connectionDescription); + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); validateMessages(); } @Test - public void testSuccessfulAuthenticationAsync() throws ExecutionException, InterruptedException { + public void testSuccessfulAuthenticationAsync() { enqueueSuccessfulAuthenticationReply(); FutureResultCallback futureCallback = new FutureResultCallback<>(); - subject.authenticateAsync(connection, connectionDescription, futureCallback); + subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback); futureCallback.get(); @@ -117,7 +117,7 @@ public void testSpeculativeAuthentication() { + "user: \"CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US\", " + "mechanism: \"MONGODB-X509\", db: \"$external\"}"); subject.setSpeculativeAuthenticateResponse(BsonDocument.parse(speculativeAuthenticateResponse)); - subject.authenticate(connection, connectionDescription); + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); assertEquals(connection.getSent().size(), 0); assertEquals(expectedSpeculativeAuthenticateCommand, subject.createSpeculativeAuthenticateCommand(connection)); diff --git a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java index a5044ee8ccf..40d33c31288 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java @@ -16,7 +16,6 @@ package com.mongodb.internal.mockito; import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.operation.ListCollectionsOperation; import org.bson.BsonDocument; import org.bson.codecs.BsonDocumentCodec; @@ -25,6 +24,7 @@ import org.mockito.Mockito; import org.mockito.internal.stubbing.answers.ThrowsException; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.when; @@ -60,13 +60,13 @@ void mockObjectWithInsufficientStubbingDetector() { void stubbingWithThrowsException() { ReadBinding binding = Mockito.mock(ReadBinding.class, new ThrowsException(new AssertionError("Unfortunately, you cannot do stubbing"))); - assertThrows(AssertionError.class, () -> when(binding.getOperationContext()).thenReturn(new OperationContext())); + assertThrows(AssertionError.class, () -> when(binding.getOperationContext()).thenReturn(OPERATION_CONTEXT)); } @Test void stubbingWithInsufficientStubbingDetector() { MongoMockito.mock(ReadBinding.class, bindingMock -> - when(bindingMock.getOperationContext()).thenReturn(new OperationContext()) + when(bindingMock.getOperationContext()).thenReturn(OPERATION_CONTEXT) ); } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy index 4381e54f2e5..998c0a28b6e 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy @@ -18,8 +18,10 @@ package com.mongodb.internal.operation import com.mongodb.MongoException import com.mongodb.async.FutureResultCallback +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.async.SingleResultCallback import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.connection.OperationContext import org.bson.Document import spock.lang.Specification @@ -31,6 +33,12 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { given: def changeStreamOpertation = Stub(ChangeStreamOperation) def binding = Mock(AsyncReadBinding) + def operationContext = Mock(OperationContext) + def timeoutContext = Mock(TimeoutContext) + binding.getOperationContext() >> operationContext + operationContext.getTimeoutContext() >> timeoutContext + timeoutContext.hasTimeoutMS() >> hasTimeoutMS + def wrapped = Mock(AsyncCommandBatchCursor) def callback = Stub(SingleResultCallback) def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, @@ -61,11 +69,19 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { then: 0 * wrapped.close() 0 * binding.release() + + where: + hasTimeoutMS << [true, false] } def 'should not close the cursor in next if the cursor was closed before next completed'() { def changeStreamOpertation = Stub(ChangeStreamOperation) def binding = Mock(AsyncReadBinding) + def operationContext = Mock(OperationContext) + def timeoutContext = Mock(TimeoutContext) + binding.getOperationContext() >> operationContext + operationContext.getTimeoutContext() >> timeoutContext + timeoutContext.hasTimeoutMS() >> hasTimeoutMS def wrapped = Mock(AsyncCommandBatchCursor) def callback = Stub(SingleResultCallback) def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, @@ -86,11 +102,19 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { then: cursor.isClosed() + + where: + hasTimeoutMS << [true, false] } def 'should throw a MongoException when next/tryNext is called after the cursor is closed'() { def changeStreamOpertation = Stub(ChangeStreamOperation) def binding = Mock(AsyncReadBinding) + def operationContext = Mock(OperationContext) + def timeoutContext = Mock(TimeoutContext) + binding.getOperationContext() >> operationContext + operationContext.getTimeoutContext() >> timeoutContext + timeoutContext.hasTimeoutMS() >> hasTimeoutMS def wrapped = Mock(AsyncCommandBatchCursor) def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) @@ -104,6 +128,9 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { then: def exception = thrown(MongoException) exception.getMessage() == 'next() called after the cursor was closed.' + + where: + hasTimeoutMS << [true, false] } List nextBatch(AsyncChangeStreamBatchCursor cursor) { diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy index 7ba7db42a01..4ea54c05ed0 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy @@ -22,14 +22,17 @@ import com.mongodb.MongoNamespace import com.mongodb.ServerAddress import com.mongodb.ServerCursor import com.mongodb.async.FutureResultCallback +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ServerConnectionState import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerType import com.mongodb.connection.ServerVersion +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.async.SingleResultCallback import com.mongodb.internal.binding.AsyncConnectionSource import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.OperationContext import org.bson.BsonArray import org.bson.BsonDocument import org.bson.BsonInt32 @@ -51,19 +54,22 @@ class AsyncCommandBatchCursorSpecification extends Specification { def initialConnection = referenceCountedAsyncConnection() def connection = referenceCountedAsyncConnection() def connectionSource = getAsyncConnectionSource(connection) - def cursor = new AsyncCommandBatchCursor(createCommandResult([], 42), batchSize, maxTimeMS, CODEC, - null, connectionSource, initialConnection) + def timeoutContext = connectionSource.getOperationContext().getTimeoutContext() + def firstBatch = createCommandResult([]) def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID)) .append('collection', new BsonString(NAMESPACE.getCollectionName())) if (batchSize != 0) { expectedCommand.append('batchSize', new BsonInt32(batchSize)) } - if (expectedMaxTimeFieldValue != null) { - expectedCommand.append('maxTimeMS', new BsonInt64(expectedMaxTimeFieldValue)) - } def reply = getMoreResponse([], 0) + when: + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, batchSize, maxTimeMS, CODEC, + null, connectionSource, initialConnection) + then: + 1 * timeoutContext.setMaxTimeOverride(*_) + when: def batch = nextBatch(cursor) @@ -97,7 +103,7 @@ class AsyncCommandBatchCursorSpecification extends Specification { def serverVersion = new ServerVersion([3, 6, 0]) def connection = referenceCountedAsyncConnection(serverVersion) def connectionSource = getAsyncConnectionSource(connection) - def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) when: @@ -126,7 +132,8 @@ class AsyncCommandBatchCursorSpecification extends Specification { def connectionSource = getAsyncConnectionSource(connection) when: - def cursor = new AsyncCommandBatchCursor(createCommandResult(FIRST_BATCH, 0), 0, 0, CODEC, + def firstBatch = createCommandResult(FIRST_BATCH, 0) + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) then: @@ -156,7 +163,7 @@ class AsyncCommandBatchCursorSpecification extends Specification { when: def firstBatch = createCommandResult([], CURSOR_ID) - def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) def batch = nextBatch(cursor) @@ -185,8 +192,8 @@ class AsyncCommandBatchCursorSpecification extends Specification { connectionSource.getCount() == 0 where: - response | response2 - getMoreResponse([]) | getMoreResponse(SECOND_BATCH, 0) + serverVersion | response | response2 + new ServerVersion([3, 6, 0]) | getMoreResponse([]) | getMoreResponse(SECOND_BATCH, 0) } def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() { @@ -199,9 +206,10 @@ class AsyncCommandBatchCursorSpecification extends Specification { def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + def firstBatch = createCommandResult() when: - def cursor = new AsyncCommandBatchCursor(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) def batch = nextBatch(cursor) @@ -255,7 +263,7 @@ class AsyncCommandBatchCursorSpecification extends Specification { def connectionSource = getAsyncConnectionSource(connectionA, connectionB) when: - def cursor = new AsyncCommandBatchCursor(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, null, connectionSource, initialConnection) def batch = nextBatch(cursor) @@ -291,7 +299,7 @@ class AsyncCommandBatchCursorSpecification extends Specification { def firstBatch = createCommandResult() when: - def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) def batch = nextBatch(cursor) @@ -331,7 +339,7 @@ class AsyncCommandBatchCursorSpecification extends Specification { def initialConnection = referenceCountedAsyncConnection() def connectionSource = getAsyncConnectionSourceWithResult(ServerType.STANDALONE) { [null, MONGO_EXCEPTION] } def firstBatch = createCommandResult() - def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) when: @@ -351,7 +359,7 @@ class AsyncCommandBatchCursorSpecification extends Specification { when: def firstBatch = createCommandResult() - def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) then: @@ -378,7 +386,7 @@ class AsyncCommandBatchCursorSpecification extends Specification { when: def firstBatch = createCommandResult() - def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) then: @@ -511,6 +519,9 @@ class AsyncCommandBatchCursorSpecification extends Specification { .state(ServerConnectionState.CONNECTED) .build() } + OperationContext operationContext = Mock(OperationContext) + operationContext.getTimeoutContext() >> Mock(TimeoutContext) + mock.getOperationContext() >> operationContext mock.getConnection(_) >> { if (counter == 0) { throw new IllegalStateException('Tried to use released AsyncConnectionSource') diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy index f897413e12d..2e99b61efdf 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation - import com.mongodb.MongoWriteConcernException import com.mongodb.ReadConcern import com.mongodb.ReadPreference @@ -36,6 +35,7 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.Decoder import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ReadPreference.primary import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync @@ -54,7 +54,7 @@ class AsyncOperationHelperSpecification extends Specification { getMaxWireVersion() >> getMaxWireVersionForServerVersion([4, 0, 0]) getServerType() >> ServerType.REPLICA_SET_PRIMARY } - def commandCreator = { serverDesc, connectionDesc -> command } + def commandCreator = { csot, serverDesc, connectionDesc -> command } def callback = new SingleResultCallback() { def result def throwable @@ -73,24 +73,26 @@ class AsyncOperationHelperSpecification extends Specification { _ * getDescription() >> connectionDescription } + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) def connectionSource = Stub(AsyncConnectionSource) { - getServerApi() >> null getConnection(_) >> { it[0].onResult(connection, null) } - _ * getServerDescription() >> serverDescription + getServerDescription() >> serverDescription + getOperationContext() >> operationContext } def asyncWriteBinding = Stub(AsyncWriteBinding) { - getServerApi() >> null getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> false - getReadConcern() >> ReadConcern.DEFAULT - } + getOperationContext() >> operationContext } when: - executeRetryableWriteAsync(asyncWriteBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder, - commandCreator, FindAndModifyHelper.asyncTransformer(), { cmd -> cmd }, callback) + executeRetryableWriteAsync(asyncWriteBinding, dbName, primary(), + new NoOpFieldNameValidator(), decoder, commandCreator, FindAndModifyHelper.asyncTransformer(), + { cmd -> cmd }, callback) then: 2 * connection.commandAsync(dbName, command, _, primary(), decoder, *_) >> { it.last().onResult(results.poll(), null) } @@ -107,11 +109,9 @@ class AsyncOperationHelperSpecification extends Specification { def callback = Stub(SingleResultCallback) def connection = Mock(AsyncConnection) def connectionSource = Stub(AsyncConnectionSource) { - getServerApi() >> null getConnection(_) >> { it[0].onResult(connection, null) } } def asyncWriteBinding = Stub(AsyncWriteBinding) { - getServerApi() >> null getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } def connectionDescription = Stub(ConnectionDescription) @@ -129,18 +129,18 @@ class AsyncOperationHelperSpecification extends Specification { given: def dbName = 'db' def command = new BsonDocument('fakeCommandName', BsonNull.VALUE) - def commandCreator = { serverDescription, connectionDescription -> command } + def commandCreator = { csot, serverDescription, connectionDescription -> command } def decoder = Stub(Decoder) def callback = Stub(SingleResultCallback) def function = Stub(CommandReadTransformerAsync) def connection = Mock(AsyncConnection) def connectionSource = Stub(AsyncConnectionSource) { - getServerApi() >> null + getOperationContext() >> OPERATION_CONTEXT getConnection(_) >> { it[0].onResult(connection, null) } getReadPreference() >> readPreference } def asyncReadBinding = Stub(AsyncReadBinding) { - getServerApi() >> null + getOperationContext() >> OPERATION_CONTEXT getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } def connectionDescription = Stub(ConnectionDescription) diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy index c7e1a0d4363..2ccd3513cf7 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy @@ -31,10 +31,14 @@ import com.mongodb.connection.ConnectionId import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerId import com.mongodb.connection.ServerType +import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.TimeoutSettings import com.mongodb.internal.bulk.DeleteRequest import com.mongodb.internal.bulk.InsertRequest import com.mongodb.internal.bulk.UpdateRequest import com.mongodb.internal.bulk.WriteRequest +import com.mongodb.internal.connection.OperationContext import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext import org.bson.BsonDocument import org.bson.BsonInt32 @@ -45,6 +49,7 @@ import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE class BulkWriteBatchSpecification extends Specification { + private static final TimeoutContext TIMEOUT_CONTEXT = new TimeoutContext(new TimeoutSettings(0, 0, 0, 0, 0)) def namespace = new MongoNamespace('db.coll') def serverDescription = ServerDescription.builder().address(new ServerAddress()).state(CONNECTED) .logicalSessionTimeoutMinutes(30) @@ -53,11 +58,12 @@ class BulkWriteBatchSpecification extends Specification { new ConnectionId(new ServerId(new ClusterId(), serverDescription.getAddress())), 6, ServerType.REPLICA_SET_PRIMARY, 1000, 16000, 48000, []) def sessionContext = new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT) + def operationContext = new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, TIMEOUT_CONTEXT, null) def 'should split payloads by type when ordered'() { when: def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, - WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests(), sessionContext, null, null) + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests(), operationContext, null, null) def payload = bulkWriteBatch.getPayload() payload.setPosition(payload.size()) @@ -137,7 +143,7 @@ class BulkWriteBatchSpecification extends Specification { def 'should group payloads by type when unordered'() { when: def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, - WriteConcern.MAJORITY, true, false, getWriteRequests(), sessionContext, null, null) + WriteConcern.MAJORITY, true, false, getWriteRequests(), operationContext, null, null) def payload = bulkWriteBatch.getPayload() payload.setPosition(payload.size()) @@ -189,7 +195,7 @@ class BulkWriteBatchSpecification extends Specification { def 'should split payloads if only payload partially processed'() { when: def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, - WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..3], sessionContext, null, null) + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..3], operationContext, null, null) def payload = bulkWriteBatch.getPayload() payload.setPosition(1) @@ -237,7 +243,7 @@ class BulkWriteBatchSpecification extends Specification { new InsertRequest(toBsonDocument('{_id: 1}')), new InsertRequest(toBsonDocument('{_id: 2}')) ], - sessionContext, null, null) + operationContext, null, null) def payload = bulkWriteBatch.getPayload() payload.setPosition(1) payload.insertedIds.put(0, new BsonInt32(0)) @@ -278,7 +284,7 @@ class BulkWriteBatchSpecification extends Specification { new InsertRequest(toBsonDocument('{_id: 1}')), new InsertRequest(toBsonDocument('{_id: 2}')) ], - sessionContext, null, null) + operationContext, null, null) def payload = bulkWriteBatch.getPayload() payload.setPosition(3) payload.insertedIds.put(0, new BsonInt32(0)) @@ -300,7 +306,7 @@ class BulkWriteBatchSpecification extends Specification { when: def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, WriteConcern.ACKNOWLEDGED, null, true, - [new DeleteRequest(new BsonDocument()).multi(true), new InsertRequest(new BsonDocument())], sessionContext, null, null) + [new DeleteRequest(new BsonDocument()).multi(true), new InsertRequest(new BsonDocument())], operationContext, null, null) then: !bulkWriteBatch.getRetryWrites() @@ -309,7 +315,7 @@ class BulkWriteBatchSpecification extends Specification { def 'should handle operation responses'() { given: def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, - WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[1..1], sessionContext, null, null) + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[1..1], operationContext, null, null) def writeConcernError = toBsonDocument('{ok: 1, n: 1, upserted: [{_id: 2, index: 0}]}') when: @@ -324,7 +330,7 @@ class BulkWriteBatchSpecification extends Specification { def 'should handle writeConcernError error responses'() { given: def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, - WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], sessionContext, null, null) + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], operationContext, null, null) def writeConcernError = toBsonDocument('{n: 1, writeConcernError: {code: 75, errmsg: "wtimeout", errInfo: {wtimeout: "0"}}}') when: @@ -340,7 +346,7 @@ class BulkWriteBatchSpecification extends Specification { def 'should handle writeErrors error responses'() { given: def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, - WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], sessionContext, null, null) + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], operationContext, null, null) def writeError = toBsonDocument('''{"ok": 0, "n": 1, "code": 65, "errmsg": "bulk op errors", "writeErrors": [{ "index" : 0, "code" : 100, "errmsg": "some error"}] }''') diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java new file mode 100644 index 00000000000..48c3a50e79a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java @@ -0,0 +1,332 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.Document; +import org.bson.RawBsonDocument; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +final class ChangeStreamBatchCursorTest { + + private static final List RESULT_FROM_NEW_CURSOR = new ArrayList<>(); + private final int maxWireVersion = ServerVersionHelper.SIX_DOT_ZERO_WIRE_VERSION; + private ServerDescription serverDescription; + private TimeoutContext timeoutContext; + private OperationContext operationContext; + private Connection connection; + private ConnectionSource connectionSource; + private ReadBinding readBinding; + private BsonDocument resumeToken; + private CommandBatchCursor commandBatchCursor; + private CommandBatchCursor newCommandBatchCursor; + private ChangeStreamBatchCursor newChangeStreamCursor; + private ChangeStreamOperation changeStreamOperation; + + @Test + @DisplayName("should return result on next") + void shouldReturnResultOnNext() { + when(commandBatchCursor.next()).thenReturn(RESULT_FROM_NEW_CURSOR); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + + //when + List next = cursor.next(); + + //then + assertEquals(RESULT_FROM_NEW_CURSOR, next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(commandBatchCursor); + verify(changeStreamOperation, times(1)).getDecoder(); + verifyNoMoreInteractions(changeStreamOperation); + } + + @Test + @DisplayName("should throw timeout exception without resume attempt on next") + void shouldThrowTimeoutExceptionWithoutResumeAttemptOnNext() { + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout")); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + assertThrows(MongoOperationTimeoutException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(commandBatchCursor); + verifyNoResumeAttemptCalled(); + } + + @Test + @DisplayName("should perform resume attempt on next when resumable error is thrown") + void shouldPerformResumeAttemptOnNextWhenResumableErrorIsThrown() { + when(commandBatchCursor.next()).thenThrow(new MongoNotPrimaryException(new BsonDocument(), new ServerAddress())); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + List next = cursor.next(); + + //then + assertEquals(RESULT_FROM_NEW_CURSOR, next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyResumeAttemptCalled(); + verify(changeStreamOperation, times(1)).getDecoder(); + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(newCommandBatchCursor); + verifyNoMoreInteractions(changeStreamOperation); + } + + + @Test + @DisplayName("should resume only once on subsequent calls after timeout error") + void shouldResumeOnlyOnceOnSubsequentCallsAfterTimeoutError() { + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout")); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + assertThrows(MongoOperationTimeoutException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(commandBatchCursor); + verifyNoResumeAttemptCalled(); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //when seconds next is called. Resume is attempted. + List next = cursor.next(); + + //then + assertEquals(Collections.emptyList(), next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).close(); + verifyNoMoreInteractions(commandBatchCursor); + verify(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion); + verify(changeStreamOperation, times(1)).getDecoder(); + verify(changeStreamOperation, times(1)).execute(readBinding); + verifyNoMoreInteractions(changeStreamOperation); + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //when third next is called. No resume is attempted. + List next2 = cursor.next(); + + //then + assertEquals(Collections.emptyList(), next2); + verifyNoInteractions(commandBatchCursor); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(newCommandBatchCursor); + verify(changeStreamOperation, times(1)).getDecoder(); + verifyNoMoreInteractions(changeStreamOperation); + verifyNoInteractions(readBinding); + verifyNoMoreInteractions(changeStreamOperation); + } + + @Test + @DisplayName("should propagate any errors occurred in aggregate operation during creating new change stream when previous next timed out") + void shouldPropagateAnyErrorsOccurredInAggregateOperation() { + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout")); + MongoNotPrimaryException resumableError = new MongoNotPrimaryException(new BsonDocument(), new ServerAddress()); + when(changeStreamOperation.execute(readBinding)).thenThrow(resumableError); + + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + assertThrows(MongoNotPrimaryException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verifyResumeAttemptCalled(); + verifyNoMoreInteractions(changeStreamOperation); + verifyNoInteractions(newCommandBatchCursor); + } + + + @Test + @DisplayName("should perform a resume attempt in subsequent next call when previous resume attempt in next timed out") + void shouldResumeAfterTimeoutInAggregateOnNextCall() { + //given + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + + //first next operation times out on getMore + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout during next call")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //second next operation times out on resume attempt when creating change stream + when(changeStreamOperation.execute(readBinding)).thenThrow(new MongoOperationTimeoutException("timeout during resumption")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation); + + doReturn(newChangeStreamCursor).when(changeStreamOperation).execute(readBinding); + + //when third operation succeeds to resume and call next + List next = cursor.next(); + + //then + assertEquals(RESULT_FROM_NEW_CURSOR, next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + + verifyResumeAttemptCalled(); + verify(changeStreamOperation, times(1)).getDecoder(); + verifyNoMoreInteractions(changeStreamOperation); + + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(newCommandBatchCursor); + } + + @Test + @DisplayName("should close change stream when resume operation fails due to non-timeout error") + void shouldCloseChangeStreamWhenResumeOperationFailsDueToNonTimeoutError() { + //given + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + + //first next operation times out on getMore + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout during next call")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //when second next operation errors on resume attempt when creating change stream + when(changeStreamOperation.execute(readBinding)).thenThrow(new MongoNotPrimaryException(new BsonDocument(), new ServerAddress())); + assertThrows(MongoNotPrimaryException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verifyResumeAttemptCalled(); + verifyNoMoreInteractions(changeStreamOperation); + verifyNoInteractions(newCommandBatchCursor); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + + //when third next operation errors with cursor closed exception + doThrow(new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR)).when(commandBatchCursor).next(); + MongoException mongoException = assertThrows(MongoException.class, cursor::next); + + //then + assertEquals(MESSAGE_IF_CLOSED_AS_CURSOR, mongoException.getMessage()); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verifyNoResumeAttemptCalled(); + } + + private ChangeStreamBatchCursor createChangeStreamCursor() { + ChangeStreamBatchCursor cursor = + new ChangeStreamBatchCursor<>(changeStreamOperation, commandBatchCursor, readBinding, null, maxWireVersion); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + return cursor; + } + + private void verifyNoResumeAttemptCalled() { + verifyNoInteractions(changeStreamOperation); + verifyNoInteractions(newCommandBatchCursor); + verifyNoInteractions(readBinding); + } + + + private void verifyResumeAttemptCalled() { + verify(commandBatchCursor, times(1)).close(); + verify(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion); + verify(changeStreamOperation, times(1)).execute(readBinding); + verifyNoMoreInteractions(commandBatchCursor); + } + + @BeforeEach + @SuppressWarnings("unchecked") + void setUp() { + resumeToken = new BsonDocument("_id", new BsonInt32(1)); + serverDescription = mock(ServerDescription.class); + when(serverDescription.getMaxWireVersion()).thenReturn(maxWireVersion); + + timeoutContext = mock(TimeoutContext.class); + when(timeoutContext.hasTimeoutMS()).thenReturn(true); + doNothing().when(timeoutContext).resetTimeoutIfPresent(); + + operationContext = mock(OperationContext.class); + when(operationContext.getTimeoutContext()).thenReturn(timeoutContext); + connection = mock(Connection.class); + when(connection.command(any(), any(), any(), any(), any(), any())).thenReturn(null); + connectionSource = mock(ConnectionSource.class); + when(connectionSource.getConnection()).thenReturn(connection); + when(connectionSource.release()).thenReturn(1); + when(connectionSource.getServerDescription()).thenReturn(serverDescription); + + readBinding = mock(ReadBinding.class); + when(readBinding.getOperationContext()).thenReturn(operationContext); + when(readBinding.retain()).thenReturn(readBinding); + when(readBinding.release()).thenReturn(1); + when(readBinding.getReadConnectionSource()).thenReturn(connectionSource); + + + commandBatchCursor = mock(CommandBatchCursor.class); + when(commandBatchCursor.getPostBatchResumeToken()).thenReturn(resumeToken); + doNothing().when(commandBatchCursor).close(); + + newCommandBatchCursor = mock(CommandBatchCursor.class); + when(newCommandBatchCursor.getPostBatchResumeToken()).thenReturn(resumeToken); + when(newCommandBatchCursor.next()).thenReturn(RESULT_FROM_NEW_CURSOR); + doNothing().when(newCommandBatchCursor).close(); + + newChangeStreamCursor = mock(ChangeStreamBatchCursor.class); + when(newChangeStreamCursor.getWrapped()).thenReturn(newCommandBatchCursor); + + changeStreamOperation = mock(ChangeStreamOperation.class); + when(changeStreamOperation.getDecoder()).thenReturn(new DocumentCodec()); + doNothing().when(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion); + when(changeStreamOperation.execute(readBinding)).thenReturn(newChangeStreamCursor); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy index 38496f02552..72e9e135b42 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy @@ -23,13 +23,16 @@ import com.mongodb.MongoSocketException import com.mongodb.MongoSocketOpenException import com.mongodb.ServerAddress import com.mongodb.ServerCursor +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ServerConnectionState import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerType import com.mongodb.connection.ServerVersion +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.binding.ConnectionSource import com.mongodb.internal.connection.Connection +import com.mongodb.internal.connection.OperationContext import org.bson.BsonArray import org.bson.BsonDocument import org.bson.BsonInt32 @@ -51,21 +54,24 @@ class CommandBatchCursorSpecification extends Specification { def initialConnection = referenceCountedConnection() def connection = referenceCountedConnection() def connectionSource = getConnectionSource(connection) + def timeoutContext = connectionSource.getOperationContext().getTimeoutContext() def firstBatch = createCommandResult([]) - def cursor = new CommandBatchCursor(firstBatch, batchSize, maxTimeMS, CODEC, - null, connectionSource, initialConnection) def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID)) .append('collection', new BsonString(NAMESPACE.getCollectionName())) if (batchSize != 0) { expectedCommand.append('batchSize', new BsonInt32(batchSize)) } - if (expectedMaxTimeFieldValue != null) { - expectedCommand.append('maxTimeMS', new BsonInt64(expectedMaxTimeFieldValue)) - } def reply = getMoreResponse([], 0) + when: + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, batchSize, maxTimeMS, CODEC, + null, connectionSource, initialConnection) + + then: + 1 * timeoutContext.setMaxTimeOverride(*_) + when: cursor.hasNext() @@ -96,7 +102,7 @@ class CommandBatchCursorSpecification extends Specification { def serverVersion = new ServerVersion([3, 6, 0]) def connection = referenceCountedConnection(serverVersion) def connectionSource = getConnectionSource(connection) - def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) when: @@ -124,7 +130,7 @@ class CommandBatchCursorSpecification extends Specification { when: def firstBatch = createCommandResult(FIRST_BATCH, 0) - def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) then: @@ -148,7 +154,7 @@ class CommandBatchCursorSpecification extends Specification { when: def firstBatch = createCommandResult([], CURSOR_ID) - def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) def batch = cursor.next() @@ -202,7 +208,7 @@ class CommandBatchCursorSpecification extends Specification { def firstBatch = createCommandResult() when: - CommandBatchCursor cursor = new CommandBatchCursor<>(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) List batch = cursor.next() @@ -254,7 +260,7 @@ class CommandBatchCursorSpecification extends Specification { def connectionSource = getConnectionSource(connectionA, connectionB) when: - def cursor = new CommandBatchCursor(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + def cursor = new CommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, null, connectionSource, initialConnection) def batch = cursor.next() @@ -290,7 +296,7 @@ class CommandBatchCursorSpecification extends Specification { def firstBatch = createCommandResult() when: - def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) def batch = cursor.next() @@ -329,7 +335,7 @@ class CommandBatchCursorSpecification extends Specification { def initialConnection = referenceCountedConnection() def connectionSource = getConnectionSourceWithResult(ServerType.STANDALONE) { throw MONGO_EXCEPTION } def firstBatch = createCommandResult() - def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) when: @@ -350,7 +356,7 @@ class CommandBatchCursorSpecification extends Specification { when: def firstBatch = createCommandResult() - def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) then: @@ -377,7 +383,7 @@ class CommandBatchCursorSpecification extends Specification { when: def firstBatch = createCommandResult() - def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, null, connectionSource, initialConnection) then: @@ -437,7 +443,7 @@ class CommandBatchCursorSpecification extends Specification { connectionSource.retain() >> connectionSource def initialResults = createCommandResult([]) - def cursor = new CommandBatchCursor(initialResults, 2, 100, new DocumentCodec(), + def cursor = new CommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, initialResults, 2, 100, CODEC, null, connectionSource, initialConnection) when: @@ -463,7 +469,7 @@ class CommandBatchCursorSpecification extends Specification { connectionSource.retain() >> connectionSource def initialResults = createCommandResult([]) - def cursor = new CommandBatchCursor(initialResults, 2, 100, new DocumentCodec(), + def cursor = new CommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, initialResults, 2, 100, CODEC, null, connectionSource, initialConnection) when: @@ -563,6 +569,9 @@ class CommandBatchCursorSpecification extends Specification { .state(ServerConnectionState.CONNECTED) .build() } + OperationContext operationContext = Mock(OperationContext) + operationContext.getTimeoutContext() >> Mock(TimeoutContext) + mock.getOperationContext() >> operationContext mock.getConnection() >> { if (counter == 0) { throw new IllegalStateException('Tried to use released ConnectionSource') diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java new file mode 100644 index 00000000000..3380785bd70 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java @@ -0,0 +1,172 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + + +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerAddress; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.connection.ServerVersion; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.Decoder; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +class CommandBatchCursorTest { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("test", "test"); + private static final BsonInt64 CURSOR_ID = new BsonInt64(1); + private static final BsonDocument COMMAND_CURSOR_DOCUMENT = new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.getFullName())) + .append("id", CURSOR_ID) + .append("firstBatch", new BsonArrayWrapper<>(new BsonArray()))); + + private static final Decoder DOCUMENT_CODEC = new DocumentCodec(); + + + private Connection mockConnection; + private ConnectionDescription mockDescription; + private ConnectionSource connectionSource; + private OperationContext operationContext; + private TimeoutContext timeoutContext; + private ServerDescription serverDescription; + + @BeforeEach + void setUp() { + ServerVersion serverVersion = new ServerVersion(3, 6); + + mockConnection = mock(Connection.class, "connection"); + mockDescription = mock(ConnectionDescription.class); + when(mockDescription.getMaxWireVersion()).thenReturn(getMaxWireVersionForServerVersion(serverVersion.getVersionList())); + when(mockDescription.getServerType()).thenReturn(ServerType.LOAD_BALANCER); + when(mockConnection.getDescription()).thenReturn(mockDescription); + when(mockConnection.retain()).thenReturn(mockConnection); + + connectionSource = mock(ConnectionSource.class); + operationContext = mock(OperationContext.class); + timeoutContext = mock(TimeoutContext.class); + serverDescription = mock(ServerDescription.class); + when(operationContext.getTimeoutContext()).thenReturn(timeoutContext); + when(connectionSource.getOperationContext()).thenReturn(operationContext); + when(connectionSource.getConnection()).thenReturn(mockConnection); + when(connectionSource.getServerDescription()).thenReturn(serverDescription); + } + + + @Test + void shouldSkipKillsCursorsCommandWhenNetworkErrorOccurs() { + //given + when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow( + new MongoSocketException("test", new ServerAddress())); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + + CommandBatchCursor commandBatchCursor = createBatchCursor(); + //when + Assertions.assertThrows(MongoSocketException.class, commandBatchCursor::next); + + //then + commandBatchCursor.close(); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any()); + } + + private CommandBatchCursor createBatchCursor() { + return new CommandBatchCursor<>( + TimeoutMode.CURSOR_LIFETIME, + COMMAND_CURSOR_DOCUMENT, + 0, + 0, + DOCUMENT_CODEC, + null, + connectionSource, + mockConnection); + } + + @Test + void shouldNotSkipKillsCursorsCommandWhenTimeoutExceptionDoesNotHaveNetworkErrorCause() { + //given + when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow( + new MongoOperationTimeoutException("test")); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + when(timeoutContext.hasTimeoutMS()).thenReturn(true); + + CommandBatchCursor commandBatchCursor = createBatchCursor(); + + //when + Assertions.assertThrows(MongoOperationTimeoutException.class, commandBatchCursor::next); + + commandBatchCursor.close(); + + + //then + verify(mockConnection, times(2)).command(any(), + any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any()); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any()); + } + + @Test + void shouldSkipKillsCursorsCommandWhenTimeoutExceptionHaveNetworkErrorCause() { + //given + when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow( + new MongoOperationTimeoutException("test", new MongoSocketException("test", new ServerAddress()))); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + when(timeoutContext.hasTimeoutMS()).thenReturn(true); + + CommandBatchCursor commandBatchCursor = createBatchCursor(); + + //when + Assertions.assertThrows(MongoOperationTimeoutException.class, commandBatchCursor::next); + commandBatchCursor.close(); + + //then + verify(mockConnection, times(1)).command(any(), + any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any()); + verify(mockConnection, never()).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy index dc17329ae91..21ae1c4dfb9 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy @@ -18,20 +18,25 @@ package com.mongodb.internal.operation import com.mongodb.MongoException import com.mongodb.MongoTimeoutException +import com.mongodb.ReadConcern import com.mongodb.WriteConcern import com.mongodb.async.FutureResultCallback import com.mongodb.internal.binding.AsyncWriteBinding import com.mongodb.internal.binding.WriteBinding import com.mongodb.internal.session.SessionContext +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + class CommitTransactionOperationUnitSpecification extends OperationUnitSpecification { def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException'() { given: + def sessionContext = Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + hasActiveTransaction() >> true + } def writeBinding = Stub(WriteBinding) { getWriteConnectionSource() >> { throw new MongoTimeoutException('Time out!') } - getSessionContext() >> Stub(SessionContext) { - hasActiveTransaction() >> true - } + getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext) } def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) @@ -45,13 +50,15 @@ class CommitTransactionOperationUnitSpecification extends OperationUnitSpecifica def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException asynchronously'() { given: + def sessionContext = Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + hasActiveTransaction() >> true + } def writeBinding = Stub(AsyncWriteBinding) { getWriteConnectionSource(_) >> { it[0].onResult(null, new MongoTimeoutException('Time out!')) } - getSessionContext() >> Stub(SessionContext) { - hasActiveTransaction() >> true - } + getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext) } def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) def callback = new FutureResultCallback() diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java index 15a8bd972f1..d631daf2e21 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java @@ -15,8 +15,10 @@ */ package com.mongodb.internal.operation; +import com.mongodb.ClusterFixture; import com.mongodb.MongoNamespace; import com.mongodb.ServerCursor; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.binding.ReferenceCounted; import com.mongodb.internal.connection.Connection; @@ -30,6 +32,8 @@ final class CursorResourceManagerTest { @Test void doubleCloseExecutedConcurrentlyWithOperationBeingInProgressShouldNotFail() { CursorResourceManager cursorResourceManager = new CursorResourceManager( + ClusterFixture.OPERATION_CONTEXT.getTimeoutContext(), + TimeoutMode.CURSOR_LIFETIME, new MongoNamespace("db", "coll"), MongoMockito.mock(AsyncConnectionSource.class, mock -> { when(mock.retain()).thenReturn(mock); diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy index b2bd9019ef5..021b392593c 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy @@ -28,7 +28,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import static com.mongodb.CursorType.TailableAwait -import static java.util.concurrent.TimeUnit.MILLISECONDS class FindOperationUnitSpecification extends OperationUnitSpecification { @@ -41,7 +40,8 @@ class FindOperationUnitSpecification extends OperationUnitSpecification { testOperation(operation, [3, 2, 0], expectedCommand, async, commandResult) // Overrides when: - operation.filter(new BsonDocument('a', BsonBoolean.TRUE)) + operation = new FindOperation(namespace, new BsonDocumentCodec()) + .filter(new BsonDocument('a', BsonBoolean.TRUE)) .projection(new BsonDocument('x', new BsonInt32(1))) .skip(2) .limit(limit) @@ -49,7 +49,7 @@ class FindOperationUnitSpecification extends OperationUnitSpecification { .cursorType(TailableAwait) .noCursorTimeout(true) .partial(true) - .maxTime(10, MILLISECONDS) + .comment(new BsonString('my comment')) .hint(BsonDocument.parse('{ hint : 1}')) .min(BsonDocument.parse('{ abc: 99 }')) @@ -68,7 +68,6 @@ class FindOperationUnitSpecification extends OperationUnitSpecification { .append('awaitData', BsonBoolean.TRUE) .append('allowPartialResults', BsonBoolean.TRUE) .append('noCursorTimeout', BsonBoolean.TRUE) - .append('maxTimeMS', new BsonInt64(operation.getMaxTime(MILLISECONDS))) .append('comment', operation.getComment()) .append('hint', operation.getHint()) .append('min', operation.getMin()) diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java index 4a4654b38a1..12a964db625 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java @@ -27,7 +27,6 @@ import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.connection.Connection; -import com.mongodb.internal.connection.OperationContext; import com.mongodb.lang.Nullable; import org.bson.BsonBoolean; import org.bson.BsonDocument; @@ -40,10 +39,10 @@ import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.internal.mockito.MongoMockito.mock; import static java.util.Collections.emptyList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.ArgumentCaptor.forClass; @@ -68,13 +67,11 @@ void executedCommandIsCorrect() { boolean nameOnly = true; boolean authorizedCollections = true; int batchSize = 123; - long maxTime = 1234; BsonValue comment = new BsonString("comment"); operation.filter(filter) .nameOnly(nameOnly) .authorizedCollections(authorizedCollections) .batchSize(batchSize) - .maxTime(maxTime, MILLISECONDS) .comment(comment); assertEquals( new BsonDocument() @@ -85,7 +82,6 @@ void executedCommandIsCorrect() { .append("cursor", new BsonDocument() .append("batchSize", new BsonInt32(batchSize)) ) - .append("maxTimeMS", new BsonInt64(maxTime)) .append("comment", comment), executeOperationAndCaptureCommand() ); @@ -112,9 +108,9 @@ private BsonDocument executeOperationAndCaptureCommand() { private static Mocks mocks(final MongoNamespace namespace) { Mocks result = new Mocks(); result.readBinding(mock(ReadBinding.class, bindingMock -> { - OperationContext operationContext = new OperationContext(); - when(bindingMock.getOperationContext()).thenReturn(operationContext); + when(bindingMock.getOperationContext()).thenReturn(OPERATION_CONTEXT); ConnectionSource connectionSource = mock(ConnectionSource.class, connectionSourceMock -> { + when(connectionSourceMock.getOperationContext()).thenReturn(OPERATION_CONTEXT); when(connectionSourceMock.release()).thenReturn(1); ServerAddress serverAddress = new ServerAddress(); result.connection(mock(Connection.class, connectionMock -> { diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy index ff664a594ea..fd9786e8dbf 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy @@ -32,6 +32,7 @@ import org.bson.BsonArray import org.bson.BsonDocument import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.WriteConcern.ACKNOWLEDGED import static com.mongodb.WriteConcern.UNACKNOWLEDGED import static com.mongodb.connection.ServerConnectionState.CONNECTED @@ -107,8 +108,8 @@ class OperationHelperSpecification extends Specification { } expect: - canRetryRead(retryableServerDescription, noTransactionSessionContext) - !canRetryRead(retryableServerDescription, activeTransactionSessionContext) + canRetryRead(retryableServerDescription, OPERATION_CONTEXT.withSessionContext(noTransactionSessionContext)) + !canRetryRead(retryableServerDescription, OPERATION_CONTEXT.withSessionContext(activeTransactionSessionContext)) } diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy index 01ad72455fb..11710eff7df 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy @@ -41,6 +41,8 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + class OperationUnitSpecification extends Specification { // Have to add to this map for every server release @@ -95,6 +97,12 @@ class OperationUnitSpecification extends Specification { def testSyncOperation(operation, List serverVersion, result, Boolean checkCommand=true, BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) { + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + def connection = Mock(Connection) { _ * getDescription() >> Stub(ConnectionDescription) { getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) @@ -104,20 +112,16 @@ class OperationUnitSpecification extends Specification { def connectionSource = Stub(ConnectionSource) { getConnection() >> connection getReadPreference() >> readPreference - getServerApi() >> null + getOperationContext() >> operationContext } def readBinding = Stub(ReadBinding) { getReadConnectionSource() >> connectionSource getReadPreference() >> readPreference - getServerApi() >> null - getSessionContext() >> Stub(SessionContext) { - hasActiveTransaction() >> false - getReadConcern() >> ReadConcern.DEFAULT - } + getOperationContext() >> operationContext } def writeBinding = Stub(WriteBinding) { - getServerApi() >> null getWriteConnectionSource() >> connectionSource + getOperationContext() >> operationContext } if (checkCommand) { @@ -149,6 +153,13 @@ class OperationUnitSpecification extends Specification { def testAsyncOperation(operation, List serverVersion, result = null, Boolean checkCommand=true, BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) { + + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + def connection = Mock(AsyncConnection) { _ * getDescription() >> Stub(ConnectionDescription) { getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) @@ -156,22 +167,18 @@ class OperationUnitSpecification extends Specification { } def connectionSource = Stub(AsyncConnectionSource) { - getServerApi() >> null - getReadPreference() >> readPreference getConnection(_) >> { it[0].onResult(connection, null) } + getReadPreference() >> readPreference + getOperationContext() >> getOperationContext() >> operationContext } def readBinding = Stub(AsyncReadBinding) { - getServerApi() >> null getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } getReadPreference() >> readPreference - getSessionContext() >> Stub(SessionContext) { - hasActiveTransaction() >> false - getReadConcern() >> ReadConcern.DEFAULT - } + getOperationContext() >> operationContext } def writeBinding = Stub(AsyncWriteBinding) { - getServerApi() >> null getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getOperationContext() >> operationContext } def callback = new FutureResultCallback() diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy index a18148911bf..ab6b6e252ab 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation - import com.mongodb.MongoWriteConcernException import com.mongodb.ReadConcern import com.mongodb.ReadPreference @@ -35,6 +34,7 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.Decoder import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ReadPreference.primary import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer @@ -53,12 +53,12 @@ class SyncOperationHelperSpecification extends Specification { def connection = Mock(Connection) def function = Stub(CommandWriteTransformer) def connectionSource = Stub(ConnectionSource) { - getServerApi() >> null getConnection() >> connection + getOperationContext() >> OPERATION_CONTEXT } def writeBinding = Stub(WriteBinding) { - getServerApi() >> null getWriteConnectionSource() >> connectionSource + getOperationContext() >> OPERATION_CONTEXT } def connectionDescription = Stub(ConnectionDescription) @@ -67,15 +67,21 @@ class SyncOperationHelperSpecification extends Specification { then: _ * connection.getDescription() >> connectionDescription - 1 * connection.command(dbName, command, _, primary(), decoder, writeBinding) >> new BsonDocument() + 1 * connection.command(dbName, command, _, primary(), decoder, OPERATION_CONTEXT) >> new BsonDocument() 1 * connection.release() } def 'should retry with retryable exception'() { given: + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) def dbName = 'db' def command = BsonDocument.parse('''{findAndModify: "coll", query: {a: 1}, new: false, update: {$inc: {a :1}}, txnNumber: 1}''') - def commandCreator = { serverDescription, connectionDescription -> command } + def commandCreator = { csot, serverDescription, connectionDescription -> command } def decoder = new BsonDocumentCodec() def results = [ BsonDocument.parse('{ok: 1.0, writeConcernError: {code: 91, errmsg: "Replication is being shut down"}}'), @@ -92,23 +98,20 @@ class SyncOperationHelperSpecification extends Specification { _ * getServerDescription() >> Stub(ServerDescription) { getLogicalSessionTimeoutMinutes() >> 1 } + getOperationContext() >> operationContext } def writeBinding = Stub(WriteBinding) { getWriteConnectionSource() >> connectionSource - getServerApi() >> null - getSessionContext() >> Stub(SessionContext) { - hasSession() >> true - hasActiveTransaction() >> false - getReadConcern() >> ReadConcern.DEFAULT - } + getOperationContext() >> operationContext } when: - executeRetryableWrite(writeBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder, commandCreator, - FindAndModifyHelper.transformer()) { cmd -> cmd } + executeRetryableWrite(writeBinding, dbName, primary(), + new NoOpFieldNameValidator(), decoder, commandCreator, FindAndModifyHelper.transformer()) + { cmd -> cmd } then: - 2 * connection.command(dbName, command, _, primary(), decoder, writeBinding) >> { results.poll() } + 2 * connection.command(dbName, command, _, primary(), decoder, operationContext) >> { results.poll() } then: def ex = thrown(MongoWriteConcernException) @@ -119,17 +122,18 @@ class SyncOperationHelperSpecification extends Specification { given: def dbName = 'db' def command = new BsonDocument('fakeCommandName', BsonNull.VALUE) - def commandCreator = { serverDescription, connectionDescription -> command } + def commandCreator = { csot, serverDescription, connectionDescription -> command } def decoder = Stub(Decoder) def function = Stub(CommandReadTransformer) def connection = Mock(Connection) def connectionSource = Stub(ConnectionSource) { getConnection() >> connection getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT } def readBinding = Stub(ReadBinding) { getReadConnectionSource() >> connectionSource - getServerApi() >> null + getOperationContext() >> OPERATION_CONTEXT } def connectionDescription = Stub(ConnectionDescription) @@ -138,7 +142,7 @@ class SyncOperationHelperSpecification extends Specification { then: _ * connection.getDescription() >> connectionDescription - 1 * connection.command(dbName, command, _, readPreference, decoder, readBinding) >> new BsonDocument() + 1 * connection.command(dbName, command, _, readPreference, decoder, OPERATION_CONTEXT) >> new BsonDocument() 1 * connection.release() where: diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java new file mode 100644 index 00000000000..2c7b71949c8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.WriteConcern; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertEquals; + +class WriteConcernHelperTest { + + static WriteConcern[] shouldRemoveWtimeout(){ + return new WriteConcern[]{ + WriteConcern.ACKNOWLEDGED, + WriteConcern.MAJORITY, + WriteConcern.W1, + WriteConcern.W2, + WriteConcern.W3, + WriteConcern.UNACKNOWLEDGED, + WriteConcern.JOURNALED, + + WriteConcern.ACKNOWLEDGED.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.MAJORITY.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.W1.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.W2.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.W3.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.UNACKNOWLEDGED.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.JOURNALED.withWTimeout(100, TimeUnit.MILLISECONDS), + }; + } + + @MethodSource + @ParameterizedTest + void shouldRemoveWtimeout(final WriteConcern writeConcern){ + //when + WriteConcern clonedWithoutTimeout = WriteConcernHelper.cloneWithoutTimeout(writeConcern); + + //then + assertEquals(writeConcern.getWObject(), clonedWithoutTimeout.getWObject()); + assertEquals(writeConcern.getJournal(), clonedWithoutTimeout.getJournal()); + assertNull(clonedWithoutTimeout.getWTimeout(TimeUnit.MILLISECONDS)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java b/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java index 6de3150ad36..c7fc1d73e20 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java @@ -20,6 +20,7 @@ import com.mongodb.session.ClientSession; import org.junit.jupiter.api.Test; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; import static com.mongodb.ClusterFixture.getCluster; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -27,7 +28,7 @@ class BaseClientSessionImplTest { @Test void shouldNotCheckoutServerSessionIfNeverRequested() { - ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), null); + ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), OPERATION_CONTEXT); ClientSession clientSession = new BaseClientSessionImpl(serverSessionPool, new Object(), ClientSessionOptions.builder().build()); assertEquals(0, serverSessionPool.getInUseCount()); @@ -39,7 +40,7 @@ void shouldNotCheckoutServerSessionIfNeverRequested() { @Test void shouldDelayServerSessionCheckoutUntilRequested() { - ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), null); + ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), OPERATION_CONTEXT); ClientSession clientSession = new BaseClientSessionImpl(serverSessionPool, new Object(), ClientSessionOptions.builder().build()); assertEquals(0, serverSessionPool.getInUseCount()); diff --git a/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy index a1452d4f7a5..19bfa994200 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy @@ -32,6 +32,8 @@ import org.bson.BsonDocument import org.bson.codecs.BsonDocumentCodec import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.getServerApi import static com.mongodb.ReadPreference.primaryPreferred import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE @@ -69,7 +71,7 @@ class ServerSessionPoolSpecification extends Specification { def cluster = Stub(Cluster) { getCurrentDescription() >> connectedDescription } - def pool = new ServerSessionPool(cluster, getServerApi()) + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) when: def session = pool.get() @@ -83,7 +85,7 @@ class ServerSessionPoolSpecification extends Specification { def cluster = Stub(Cluster) { getCurrentDescription() >> connectedDescription } - def pool = new ServerSessionPool(cluster, getServerApi()) + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) pool.close() when: @@ -98,7 +100,7 @@ class ServerSessionPoolSpecification extends Specification { def cluster = Stub(Cluster) { getCurrentDescription() >> connectedDescription } - def pool = new ServerSessionPool(cluster, getServerApi()) + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) def session = pool.get() when: @@ -118,7 +120,7 @@ class ServerSessionPoolSpecification extends Specification { millis() >>> [0, MINUTES.toMillis(29) + 1, ] } - def pool = new ServerSessionPool(cluster, getServerApi(), clock) + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) def sessionOne = pool.get() when: @@ -144,7 +146,7 @@ class ServerSessionPoolSpecification extends Specification { def clock = Stub(ServerSessionPool.Clock) { millis() >>> [0, 0, 0] } - def pool = new ServerSessionPool(cluster, getServerApi(), clock) + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) def session = pool.get() when: @@ -163,7 +165,7 @@ class ServerSessionPoolSpecification extends Specification { def clock = Stub(ServerSessionPool.Clock) { millis() >> 42 } - def pool = new ServerSessionPool(cluster, getServerApi(), clock) + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) when: def session = pool.get() as ServerSessionPool.ServerSessionImpl @@ -185,7 +187,7 @@ class ServerSessionPoolSpecification extends Specification { def clock = Stub(ServerSessionPool.Clock) { millis() >> 42 } - def pool = new ServerSessionPool(cluster, getServerApi(), clock) + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) when: def session = pool.get() as ServerSessionPool.ServerSessionImpl @@ -205,7 +207,7 @@ class ServerSessionPoolSpecification extends Specification { def cluster = Mock(Cluster) { getCurrentDescription() >> connectedDescription } - def pool = new ServerSessionPool(cluster, getServerApi()) + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) def sessions = [] 10.times { sessions.add(pool.get()) } diff --git a/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java b/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java index 4f331d208a2..a1b3f37dd98 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java @@ -15,23 +15,118 @@ */ package com.mongodb.internal.time; +import com.mongodb.lang.Nullable; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; import java.util.Collection; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Condition; import java.util.stream.Stream; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.junit.jupiter.api.Assertions.assertAll; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public final class TimePointTest { + + private final AtomicLong currentNanos = new AtomicLong(); + private final TimePoint mockTimePoint = new TimePoint(0L) { + @Override + long currentNanos() { + return currentNanos.get(); + } + }; + + public static boolean isInfinite(final Timeout timeout) { + return timeout.call(NANOSECONDS, () -> true, (ns) -> false, () -> false); + } + + public static boolean hasExpired(final Timeout timeout) { + return timeout.call(NANOSECONDS, () -> false, (ns) -> false, () -> true); + } + + public static long remaining(final Timeout timeout, final TimeUnit unit) { + return timeout.checkedCall(unit, + () -> { + throw new AssertionError("Infinite TimePoints have infinite remaining time"); + }, + (time) -> time, + () -> 0L); + } + + // Timeout + + @Test + void timeoutExpiresIn() { + assertAll( + () -> assertThrows(AssertionError.class, () -> Timeout.expiresIn(-1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)), + () -> assertTrue(hasExpired(Timeout.expiresIn(0L, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED))), + () -> assertFalse(isInfinite(Timeout.expiresIn(1L, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED))), + () -> assertFalse(hasExpired(Timeout.expiresIn(1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)))); + } + + @Test + void timeoutInfinite() { + assertEquals(Timeout.infinite(), TimePoint.infinite()); + } + + @Test + void timeoutAwaitOnCondition() throws InterruptedException { + Condition condition = mock(Condition.class); + + Timeout.infinite().awaitOn(condition, () -> "ignored"); + verify(condition, times(1)).await(); + verifyNoMoreInteractions(condition); + + reset(condition); + + Timeout.expiresIn(100, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED).awaitOn(condition, () -> "ignored"); + verify(condition, times(1)).awaitNanos(anyLong()); + verifyNoMoreInteractions(condition); + } + + @Test + void timeoutAwaitOnLatch() throws InterruptedException { + CountDownLatch latch = mock(CountDownLatch.class); + + Timeout.infinite().awaitOn(latch, () -> "ignored"); + verify(latch, times(1)).await(); + verifyNoMoreInteractions(latch); + + reset(latch); + + Timeout.expiresIn(100, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED).awaitOn(latch, () -> "ignored"); + verify(latch, times(1)).await(anyLong(), any(TimeUnit.class)); + verifyNoMoreInteractions(latch); + } + + // TimePoint -final class TimePointTest { @Test void now() { TimePoint timePointLowerBound = TimePoint.at(System.nanoTime()); @@ -41,6 +136,65 @@ void now() { assertTrue(timePoint.compareTo(timePointUpperBound) <= 0, "the point is too late"); } + @Test + void infinite() { + TimePoint infinite = TimePoint.infinite(); + TimePoint now = TimePoint.now(); + assertEquals(0, infinite.compareTo(TimePoint.infinite())); + assertTrue(infinite.compareTo(now) > 0); + assertTrue(now.compareTo(infinite) < 0); + } + + @Test + void isInfinite() { + assertAll( + () -> assertTrue(isInfinite(Timeout.infinite())), + () -> assertFalse(isInfinite(TimePoint.now()))); + } + + @Test + void asTimeout() { + TimePoint t1 = TimePoint.now(); + assertSame(t1, t1.asTimeout()); + TimePoint t2 = TimePoint.infinite(); + assertSame(t2, t2.asTimeout()); + } + + + @Test + void remaining() { + assertAll( + () -> assertThrows(AssertionError.class, () -> remaining(TimePoint.infinite(), NANOSECONDS)), + () -> assertEquals(0, remaining(TimePoint.now(), NANOSECONDS)) + ); + Timeout earlier = TimePoint.at(System.nanoTime() - 100); + assertEquals(0, remaining(earlier, NANOSECONDS)); + assertTrue(hasExpired(earlier)); + + currentNanos.set(-100); + assertEquals(100, remaining(mockTimePoint, NANOSECONDS)); + currentNanos.set(-1000000); + assertEquals(1, remaining(mockTimePoint, MILLISECONDS)); + currentNanos.set(-1000000 + 1); + assertEquals(0, remaining(mockTimePoint, MILLISECONDS)); + } + + @ParameterizedTest + @ValueSource(longs = {1, 7, 10, 100, 1000}) + void remaining(final long durationNanos) { + TimePoint start = TimePoint.now(); + Timeout timeout = start.timeoutAfterOrInfiniteIfNegative(durationNanos, NANOSECONDS); + while (!hasExpired(timeout)) { + long remainingNanosUpperBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos()); + long remainingNanos = remaining(timeout, NANOSECONDS); + long remainingNanosLowerBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos()); + assertTrue(remainingNanos >= remainingNanosLowerBound, "remaining nanos is too low"); + assertTrue(remainingNanos <= remainingNanosUpperBound, "remaining nanos is too high"); + Thread.yield(); + } + assertTrue(TimePoint.now().durationSince(start).toNanos() >= durationNanos, "expired too early"); + } + @Test void elapsed() { TimePoint timePoint = TimePoint.now(); @@ -49,25 +203,88 @@ void elapsed() { Duration elapsedUpperBound = TimePoint.now().durationSince(timePoint); assertTrue(elapsed.compareTo(elapsedLowerBound) >= 0, "the elapsed is too low"); assertTrue(elapsed.compareTo(elapsedUpperBound) <= 0, "the elapsed is too high"); + assertThrows(AssertionError.class, () -> TimePoint.infinite().elapsed()); + + currentNanos.set(100); + assertEquals(100, mockTimePoint.elapsed().toNanos()); + currentNanos.set(1000000); + assertEquals(1, mockTimePoint.elapsed().toMillis()); + currentNanos.set(1000000 - 1); + assertEquals(0, mockTimePoint.elapsed().toMillis()); + } + + @Test + void hasExpired() { + assertAll( + () -> assertFalse(hasExpired(Timeout.infinite())), + () -> assertTrue(hasExpired(TimePoint.now())), + () -> assertThrows(AssertionError.class, () -> Timeout.expiresIn(-1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)), + () -> assertFalse(hasExpired(Timeout.expiresIn(1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)))); } @ParameterizedTest @MethodSource("earlierNanosAndNanosArguments") - void durationSince(final long earlierNanos, final long nanos) { - Duration expectedDuration = Duration.ofNanos(nanos - earlierNanos); + void durationSince(final Long earlierNanos, @Nullable final Long nanos) { TimePoint earlierTimePoint = TimePoint.at(earlierNanos); TimePoint timePoint = TimePoint.at(nanos); + + if (nanos == null) { + assertThrows(AssertionError.class, () -> timePoint.durationSince(earlierTimePoint)); + return; + } + + Duration expectedDuration = Duration.ofNanos(nanos - earlierNanos); assertFalse(expectedDuration.isNegative()); assertEquals(expectedDuration, timePoint.durationSince(earlierTimePoint)); assertEquals(expectedDuration.negated(), earlierTimePoint.durationSince(timePoint)); } + @ParameterizedTest + @ValueSource(longs = {1, 7, Long.MAX_VALUE / 2, Long.MAX_VALUE - 1}) + void remainingNanos(final long durationNanos) { + TimePoint start = TimePoint.now(); + TimePoint timeout = start.add(Duration.ofNanos(durationNanos)); + assertEquals(durationNanos, timeout.durationSince(start).toNanos()); + assertEquals(Math.max(0, durationNanos - 1), timeout.durationSince(start.add(Duration.ofNanos(1))).toNanos()); + assertEquals(0, timeout.durationSince(start.add(Duration.ofNanos(durationNanos))).toNanos()); + assertEquals(-1, timeout.durationSince(start.add(Duration.ofNanos(durationNanos + 1))).toNanos()); + } + + @Test + void fromNowOrInfinite() { + TimePoint timePoint = TimePoint.now(); + assertAll( + () -> assertFalse(isInfinite(TimePoint.now().timeoutAfterOrInfiniteIfNegative(1L, NANOSECONDS))), + () -> assertEquals(timePoint, timePoint.timeoutAfterOrInfiniteIfNegative(0, NANOSECONDS)), + () -> assertNotEquals(TimePoint.infinite(), timePoint.timeoutAfterOrInfiniteIfNegative(1, NANOSECONDS)), + () -> assertNotEquals(timePoint, timePoint.timeoutAfterOrInfiniteIfNegative(1, NANOSECONDS)), + () -> assertNotEquals(TimePoint.infinite(), timePoint.timeoutAfterOrInfiniteIfNegative(Long.MAX_VALUE - 1, NANOSECONDS))); + } + + @ParameterizedTest + @MethodSource("nanosAndDurationsArguments") + void add(final long nanos, final Duration duration) { + TimePoint timePoint = TimePoint.at(nanos); + assertEquals(duration, timePoint.add(duration).durationSince(timePoint)); + } + + private static Stream nanosAndDurationsArguments() { + Collection nanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); + Collection durationsInNanos = asList( + // Using `-Long.MAX_VALUE` results in `ArithmeticException` in OpenJDK JDK 8 because of https://bugs.openjdk.org/browse/JDK-8146747. + // This was fixed in OpenJDK JDK 9. + -Long.MAX_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); + return nanos.stream() + .flatMap(nano -> durationsInNanos.stream() + .map(durationNanos -> arguments(nano, Duration.ofNanos(durationNanos)))); + } + @ParameterizedTest @MethodSource("earlierNanosAndNanosArguments") - void compareTo(final long earlierNanos, final long nanos) { + void compareTo(final Long earlierNanos, final Long nanos) { TimePoint earlierTimePoint = TimePoint.at(earlierNanos); TimePoint timePoint = TimePoint.at(nanos); - if (earlierNanos == nanos) { + if (Objects.equals(earlierNanos, nanos)) { assertEquals(0, earlierTimePoint.compareTo(timePoint)); assertEquals(0, timePoint.compareTo(earlierTimePoint)); assertEquals(earlierTimePoint, timePoint); @@ -82,28 +299,30 @@ void compareTo(final long earlierNanos, final long nanos) { private static Stream earlierNanosAndNanosArguments() { Collection earlierNanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); - Collection durationsInNanos = asList(0L, 1L, Long.MAX_VALUE / 2, Long.MAX_VALUE); + Collection durationsInNanos = asList(0L, 1L, Long.MAX_VALUE / 2, Long.MAX_VALUE, null); return earlierNanos.stream() .flatMap(earlier -> durationsInNanos.stream() - .map(durationNanos -> arguments(earlier, earlier + durationNanos))); + .map(durationNanos -> arguments(earlier, durationNanos == null ? null : earlier + durationNanos))); } @ParameterizedTest - @MethodSource("nanosAndDurationsArguments") - void add(final long nanos, final Duration duration) { - TimePoint timePoint = TimePoint.at(nanos); - assertEquals(duration, timePoint.add(duration).durationSince(timePoint)); + @MethodSource("durationArguments") + void convertsUnits(final long duration, final TimeUnit unit) { + TimePoint start = TimePoint.now(); + TimePoint end = start.timeoutAfterOrInfiniteIfNegative(duration, unit); + if (duration < 0) { + assertTrue(isInfinite(end)); + } else { + assertEquals(unit.toNanos(duration), end.durationSince(start).toNanos()); + } } - private static Stream nanosAndDurationsArguments() { - Collection nanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); - Collection durationsInNanos = asList( - // Using `-Long.MAX_VALUE` results in `ArithmeticException` in OpenJDK JDK 8 because of https://bugs.openjdk.org/browse/JDK-8146747. - // This was fixed in OpenJDK JDK 9. - -Long.MAX_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); - return nanos.stream() - .flatMap(nano -> durationsInNanos.stream() - .map(durationNanos -> arguments(nano, Duration.ofNanos(durationNanos)))); + private static Stream durationArguments() { + return Stream.of(TimeUnit.values()) + .flatMap(unit -> Stream.of( + Arguments.of(-7, unit), + Arguments.of(0, unit), + Arguments.of(7, unit))); } private TimePointTest() { diff --git a/driver-core/src/test/unit/com/mongodb/internal/time/TimeoutTest.java b/driver-core/src/test/unit/com/mongodb/internal/time/TimeoutTest.java deleted file mode 100644 index 03df92771ac..00000000000 --- a/driver-core/src/test/unit/com/mongodb/internal/time/TimeoutTest.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.internal.time; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; - -import java.time.Duration; -import java.util.concurrent.TimeUnit; -import java.util.stream.Stream; - -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static org.junit.jupiter.api.Assertions.assertAll; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -final class TimeoutTest { - @Test - void isInfinite() { - assertAll( - () -> assertTrue(Timeout.infinite().isInfinite()), - () -> assertFalse(Timeout.immediate().isInfinite()), - () -> assertFalse(Timeout.startNow(1).isInfinite()), - () -> assertFalse(Timeout.started(1, TimePoint.now()).isInfinite())); - } - - @Test - void isImmediate() { - assertAll( - () -> assertTrue(Timeout.immediate().isImmediate()), - () -> assertFalse(Timeout.infinite().isImmediate()), - () -> assertFalse(Timeout.startNow(1).isImmediate()), - () -> assertFalse(Timeout.started(1, TimePoint.now()).isImmediate())); - } - - @Test - void started() { - TimePoint timePoint = TimePoint.now(); - assertAll( - () -> assertEquals(Timeout.infinite(), Timeout.started(-1, timePoint)), - () -> assertEquals(Timeout.immediate(), Timeout.started(0, timePoint)), - () -> assertNotEquals(Timeout.infinite(), Timeout.started(1, timePoint)), - () -> assertNotEquals(Timeout.immediate(), Timeout.started(1, timePoint)), - () -> assertEquals(1, Timeout.started(1, timePoint).durationNanos()), - () -> assertEquals(timePoint, Timeout.started(1, timePoint).start()), - () -> assertNotEquals(Timeout.infinite(), Timeout.started(Long.MAX_VALUE - 1, timePoint)), - () -> assertEquals(Long.MAX_VALUE - 1, Timeout.started(Long.MAX_VALUE - 1, timePoint).durationNanos()), - () -> assertEquals(timePoint, Timeout.started(Long.MAX_VALUE - 1, timePoint).start()), - () -> assertEquals(Timeout.infinite(), Timeout.started(Long.MAX_VALUE, timePoint))); - } - - @Test - void startNow() { - assertAll( - () -> assertEquals(Timeout.infinite(), Timeout.startNow(-1)), - () -> assertEquals(Timeout.immediate(), Timeout.startNow(0)), - () -> assertNotEquals(Timeout.infinite(), Timeout.startNow(1)), - () -> assertNotEquals(Timeout.immediate(), Timeout.startNow(1)), - () -> assertEquals(1, Timeout.startNow(1).durationNanos()), - () -> assertNotEquals(Timeout.infinite(), Timeout.startNow(Long.MAX_VALUE - 1)), - () -> assertEquals(Long.MAX_VALUE - 1, Timeout.startNow(Long.MAX_VALUE - 1).durationNanos()), - () -> assertEquals(Timeout.infinite(), Timeout.startNow(Long.MAX_VALUE))); - } - - @ParameterizedTest - @MethodSource("durationArguments") - void startedConvertsUnits(final long duration, final TimeUnit unit) { - TimePoint timePoint = TimePoint.now(); - if (duration < 0) { - assertTrue(Timeout.started(duration, unit, timePoint).isInfinite()); - } else if (duration == 0) { - assertTrue(Timeout.started(duration, unit, timePoint).isImmediate()); - } else { - assertEquals(unit.toNanos(duration), Timeout.started(duration, unit, timePoint).durationNanos()); - } - } - - @ParameterizedTest - @MethodSource("durationArguments") - void startNowConvertsUnits(final long duration, final TimeUnit unit) { - if (duration < 0) { - assertTrue(Timeout.startNow(duration, unit).isInfinite()); - } else if (duration == 0) { - assertTrue(Timeout.startNow(duration, unit).isImmediate()); - } else { - assertEquals(unit.toNanos(duration), Timeout.startNow(duration, unit).durationNanos()); - } - } - - private static Stream durationArguments() { - return Stream.of(TimeUnit.values()) - .flatMap(unit -> Stream.of( - Arguments.of(-7, unit), - Arguments.of(0, unit), - Arguments.of(7, unit))); - } - - @Test - void remainingTrivialCases() { - assertAll( - () -> assertThrows(AssertionError.class, () -> Timeout.infinite().remaining(NANOSECONDS)), - () -> assertTrue(Timeout.infinite().remainingOrInfinite(NANOSECONDS) < 0), - () -> assertEquals(0, Timeout.immediate().remaining(NANOSECONDS)), - () -> assertEquals(0, Timeout.immediate().remainingOrInfinite(NANOSECONDS))); - } - - @ParameterizedTest - @ValueSource(longs = {1, 7, Long.MAX_VALUE / 2, Long.MAX_VALUE - 1}) - void remainingNanos(final long durationNanos) { - TimePoint start = TimePoint.now(); - Timeout timeout = Timeout.started(durationNanos, start); - assertEquals(durationNanos, timeout.remainingNanos(start)); - assertEquals(Math.max(0, durationNanos - 1), timeout.remainingNanos(start.add(Duration.ofNanos(1)))); - assertEquals(0, timeout.remainingNanos(start.add(Duration.ofNanos(durationNanos)))); - assertEquals(0, timeout.remainingNanos(start.add(Duration.ofNanos(durationNanos + 1)))); - } - - @Test - void expired() { - assertAll( - () -> assertFalse(Timeout.infinite().expired()), - () -> assertTrue(Timeout.immediate().expired()), - () -> assertTrue(Timeout.expired(0)), - () -> assertFalse(Timeout.expired(Long.MIN_VALUE)), - () -> assertFalse(Timeout.expired(-1)), - () -> assertFalse(Timeout.expired(1)), - () -> assertFalse(Timeout.expired(Long.MAX_VALUE))); - } - - @Test - void convertRoundUp() { - assertAll( - () -> assertEquals(1, Timeout.convertRoundUp(1, NANOSECONDS)), - () -> assertEquals(0, Timeout.convertRoundUp(0, TimeUnit.MILLISECONDS)), - () -> assertEquals(1, Timeout.convertRoundUp(1, TimeUnit.MILLISECONDS)), - () -> assertEquals(1, Timeout.convertRoundUp(999_999, TimeUnit.MILLISECONDS)), - () -> assertEquals(1, Timeout.convertRoundUp(1_000_000, TimeUnit.MILLISECONDS)), - () -> assertEquals(2, Timeout.convertRoundUp(1_000_001, TimeUnit.MILLISECONDS)), - () -> assertEquals(1, Timeout.convertRoundUp(1, TimeUnit.DAYS))); - } - - @ParameterizedTest - @ValueSource(longs = {1, 7, 10, 100, 1000}) - void remaining(final long durationNanos) { - TimePoint start = TimePoint.now(); - Timeout timeout = Timeout.started(durationNanos, start); - while (!timeout.expired()) { - long remainingNanosUpperBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos()); - long remainingNanos = timeout.remaining(NANOSECONDS); - long remainingNanosLowerBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos()); - assertTrue(remainingNanos >= remainingNanosLowerBound, "remaining nanos is too low"); - assertTrue(remainingNanos <= remainingNanosUpperBound, "remaining nanos is too high"); - Thread.yield(); - } - assertTrue(TimePoint.now().durationSince(start).toNanos() >= durationNanos, "expired too early"); - } - - private TimeoutTest() { - } -} diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt index e4c3a3eb31a..439a0ccbb29 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt @@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.ExplainVerbosity import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.coroutine.AggregateFlow import java.util.concurrent.TimeUnit @@ -28,7 +29,6 @@ import org.bson.conversions.Bson data class SyncAggregateIterable(val wrapped: AggregateFlow) : JAggregateIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncAggregateIterable = apply { wrapped.batchSize(batchSize) } - override fun toCollection() = runBlocking { wrapped.toCollection() } override fun allowDiskUse(allowDiskUse: Boolean?): SyncAggregateIterable = apply { @@ -59,6 +59,10 @@ data class SyncAggregateIterable(val wrapped: AggregateFlow) : override fun let(variables: Bson?): SyncAggregateIterable = apply { wrapped.let(variables) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncAggregateIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + override fun explain(): Document = runBlocking { wrapped.explain() } override fun explain(verbosity: ExplainVerbosity): Document = runBlocking { wrapped.explain(verbosity) } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt index c29f227d5d6..83ba91df16b 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt @@ -20,6 +20,7 @@ import com.mongodb.ServerAddress import com.mongodb.TransactionOptions import com.mongodb.client.ClientSession as JClientSession import com.mongodb.client.TransactionBody +import com.mongodb.internal.TimeoutContext import com.mongodb.kotlin.client.coroutine.ClientSession import com.mongodb.session.ServerSession import kotlinx.coroutines.runBlocking @@ -86,4 +87,6 @@ class SyncClientSession(internal val wrapped: ClientSession, private val origina override fun withTransaction(transactionBody: TransactionBody, options: TransactionOptions): T = throw UnsupportedOperationException() + + override fun getTimeoutContext(): TimeoutContext? = wrapped.getTimeoutContext() } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt index 4f412c253a0..0fdc879d610 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.coroutine.DistinctFlow import java.util.concurrent.TimeUnit @@ -32,4 +33,7 @@ data class SyncDistinctIterable(val wrapped: DistinctFlow) : override fun collation(collation: Collation?): SyncDistinctIterable = apply { wrapped.collation(collation) } override fun comment(comment: String?): SyncDistinctIterable = apply { wrapped.comment(comment) } override fun comment(comment: BsonValue?): SyncDistinctIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncDistinctIterable = apply { + wrapped.timeoutMode(timeoutMode) + } } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt index b9e3a6665d6..6c500a9cf90 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt @@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.CursorType import com.mongodb.ExplainVerbosity import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.coroutine.FindFlow import java.util.concurrent.TimeUnit @@ -76,6 +77,7 @@ data class SyncFindIterable(val wrapped: FindFlow) : JFindIterable = apply { wrapped.returnKey(returnKey) } override fun showRecordId(showRecordId: Boolean): SyncFindIterable = apply { wrapped.showRecordId(showRecordId) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncFindIterable = apply { wrapped.timeoutMode(timeoutMode) } override fun explain(): Document = runBlocking { wrapped.explain() } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt index 4193e0f04f8..ab1853c756d 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.kotlin.client.coroutine.ListCollectionsFlow import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -25,7 +26,6 @@ data class SyncListCollectionsIterable(val wrapped: ListCollectionsFlow JListCollectionsIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncListCollectionsIterable = apply { wrapped.batchSize(batchSize) } - override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionsIterable = apply { wrapped.maxTime(maxTime, timeUnit) } @@ -33,4 +33,7 @@ data class SyncListCollectionsIterable(val wrapped: ListCollectionsFlow override fun filter(filter: Bson?): SyncListCollectionsIterable = apply { wrapped.filter(filter) } override fun comment(comment: String?): SyncListCollectionsIterable = apply { wrapped.comment(comment) } override fun comment(comment: BsonValue?): SyncListCollectionsIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListCollectionsIterable = apply { + wrapped.timeoutMode(timeoutMode) + } } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt index 3acd5581f1b..4563dfe4a4f 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.kotlin.client.coroutine.ListDatabasesFlow import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -41,4 +42,7 @@ data class SyncListDatabasesIterable(val wrapped: ListDatabasesFlow) override fun comment(comment: String?): SyncListDatabasesIterable = apply { wrapped.comment(comment) } override fun comment(comment: BsonValue?): SyncListDatabasesIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListDatabasesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt index 030b89bb1bf..0e329c7bcdd 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.kotlin.client.coroutine.ListIndexesFlow import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -28,4 +29,7 @@ data class SyncListIndexesIterable(val wrapped: ListIndexesFlow) : } override fun comment(comment: String?): SyncListIndexesIterable = apply { wrapped.comment(comment) } override fun comment(comment: BsonValue?): SyncListIndexesIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt index 62af2fe0c7c..a7df87779df 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt @@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.ExplainVerbosity import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.coroutine.ListSearchIndexesFlow import java.util.concurrent.TimeUnit @@ -45,6 +46,9 @@ internal class SyncListSearchIndexesIterable(val wrapped: ListSearchInd override fun comment(comment: String?): SyncListSearchIndexesIterable = apply { wrapped.comment(comment) } override fun comment(comment: BsonValue?): SyncListSearchIndexesIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListSearchIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } override fun explain(): Document = runBlocking { wrapped.explain() } override fun explain(verbosity: ExplainVerbosity): Document = runBlocking { wrapped.explain(verbosity) } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt index 9aab6ed51a6..8e5fc82455a 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt @@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.client.MapReduceIterable as JMapReduceIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.client.model.MapReduceAction import com.mongodb.kotlin.client.coroutine.MapReduceFlow @@ -57,4 +58,7 @@ data class SyncMapReduceIterable(val wrapped: MapReduceFlow) : } override fun collation(collation: Collation?): SyncMapReduceIterable = apply { wrapped.collation(collation) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncMapReduceIterable = apply { + wrapped.timeoutMode(timeoutMode) + } } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt index 9cf01ce186f..bfa48ef1e1c 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt @@ -15,76 +15,12 @@ */ package com.mongodb.kotlin.client.coroutine.syncadapter -import com.mongodb.ClientSessionOptions -import com.mongodb.client.ChangeStreamIterable -import com.mongodb.client.ClientSession -import com.mongodb.client.ListDatabasesIterable import com.mongodb.client.MongoClient as JMongoClient -import com.mongodb.client.MongoDatabase -import com.mongodb.client.MongoIterable import com.mongodb.connection.ClusterDescription import com.mongodb.kotlin.client.coroutine.MongoClient -import kotlinx.coroutines.runBlocking -import org.bson.Document -import org.bson.conversions.Bson -data class SyncMongoClient(val wrapped: MongoClient) : JMongoClient { +internal class SyncMongoClient(override val wrapped: MongoClient) : SyncMongoCluster(wrapped), JMongoClient { override fun close(): Unit = wrapped.close() - override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName)) - - override fun startSession(): ClientSession = SyncClientSession(runBlocking { wrapped.startSession() }, this) - - override fun startSession(options: ClientSessionOptions): ClientSession = - SyncClientSession(runBlocking { wrapped.startSession(options) }, this) - - override fun listDatabaseNames(): MongoIterable = SyncMongoIterable(wrapped.listDatabaseNames()) - - override fun listDatabaseNames(clientSession: ClientSession): MongoIterable = - SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped())) - - override fun listDatabases(): ListDatabasesIterable = SyncListDatabasesIterable(wrapped.listDatabases()) - - override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = - SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped())) - - override fun listDatabases(resultClass: Class): ListDatabasesIterable = - SyncListDatabasesIterable(wrapped.listDatabases(resultClass)) - - override fun listDatabases( - clientSession: ClientSession, - resultClass: Class - ): ListDatabasesIterable = - SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass)) - - override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) - - override fun watch(resultClass: Class): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) - - override fun watch(pipeline: MutableList): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(pipeline)) - - override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) - - override fun watch(clientSession: ClientSession): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) - - override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) - - override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) - - override fun watch( - clientSession: ClientSession, - pipeline: MutableList, - resultClass: Class - ): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) - override fun getClusterDescription(): ClusterDescription = wrapped.getClusterDescription() - - private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt new file mode 100644 index 00000000000..42313ed2b13 --- /dev/null +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt @@ -0,0 +1,115 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.ClientSessionOptions +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.ChangeStreamIterable +import com.mongodb.client.ClientSession +import com.mongodb.client.ListDatabasesIterable +import com.mongodb.client.MongoCluster as JMongoCluster +import com.mongodb.client.MongoDatabase +import com.mongodb.client.MongoIterable +import com.mongodb.kotlin.client.coroutine.MongoCluster +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoCluster { + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCluster = + SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadPreference(readPreference)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadConcern(readConcern)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withWriteConcern(writeConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoCluster = + SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName)) + + override fun startSession(): ClientSession = SyncClientSession(runBlocking { wrapped.startSession() }, this) + + override fun startSession(options: ClientSessionOptions): ClientSession = + SyncClientSession(runBlocking { wrapped.startSession(options) }, this) + + override fun listDatabaseNames(): MongoIterable = SyncMongoIterable(wrapped.listDatabaseNames()) + + override fun listDatabaseNames(clientSession: ClientSession): MongoIterable = + SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped())) + + override fun listDatabases(): ListDatabasesIterable = SyncListDatabasesIterable(wrapped.listDatabases()) + + override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped())) + + override fun listDatabases(resultClass: Class): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(resultClass)) + + override fun listDatabases( + clientSession: ClientSession, + resultClass: Class + ): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass)) + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt index 756c884608a..fa26fae86c1 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt @@ -55,6 +55,7 @@ import com.mongodb.client.result.InsertManyResult import com.mongodb.client.result.InsertOneResult import com.mongodb.client.result.UpdateResult import com.mongodb.kotlin.client.coroutine.MongoCollection +import java.util.concurrent.TimeUnit import kotlinx.coroutines.flow.toCollection import kotlinx.coroutines.runBlocking import org.bson.Document @@ -74,6 +75,7 @@ data class SyncMongoCollection(val wrapped: MongoCollection) : JMong override fun getWriteConcern(): WriteConcern = wrapped.writeConcern override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) override fun withDocumentClass(clazz: Class): SyncMongoCollection = SyncMongoCollection(wrapped.withDocumentClass(clazz)) @@ -90,6 +92,9 @@ data class SyncMongoCollection(val wrapped: MongoCollection) : JMong override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection = SyncMongoCollection(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection = + SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit)) + override fun countDocuments(): Long = runBlocking { wrapped.countDocuments() } override fun countDocuments(filter: Bson): Long = runBlocking { wrapped.countDocuments(filter) } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt index ee4c4d23040..ae83a1443b7 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt @@ -23,6 +23,7 @@ import com.mongodb.client.MongoDatabase as JMongoDatabase import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.CreateViewOptions import com.mongodb.kotlin.client.coroutine.MongoDatabase +import java.util.concurrent.TimeUnit import kotlinx.coroutines.runBlocking import org.bson.Document import org.bson.codecs.configuration.CodecRegistry @@ -39,6 +40,8 @@ data class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase = SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) @@ -51,6 +54,9 @@ data class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase = SyncMongoDatabase(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + override fun getCollection(collectionName: String): MongoCollection = SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java)) diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt index e7a22506f0a..98ab0d93b75 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt @@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter import com.mongodb.Function import com.mongodb.client.MongoCursor import com.mongodb.client.MongoIterable as JMongoIterable +import com.mongodb.client.cursor.TimeoutMode import kotlinx.coroutines.flow.Flow import kotlinx.coroutines.flow.firstOrNull import kotlinx.coroutines.flow.map @@ -26,6 +27,7 @@ import kotlinx.coroutines.runBlocking open class SyncMongoIterable(private val delegate: Flow) : JMongoIterable { private var batchSize: Int? = null + private var timeoutMode: TimeoutMode? = null override fun iterator(): MongoCursor = cursor() diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt index 683746efc96..c8da59450ad 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt @@ -16,6 +16,9 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.reactivestreams.client.AggregatePublisher import java.util.concurrent.TimeUnit @@ -45,6 +48,19 @@ public class AggregateFlow(private val wrapped: AggregatePublisher) */ public fun batchSize(batchSize: Int): AggregateFlow = apply { wrapped.batchSize(batchSize) } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): AggregateFlow = apply { wrapped.timeoutMode(timeoutMode) } + /** * Aggregates documents according to the specified aggregation pipeline, which must end with a $out or $merge stage. * @@ -167,7 +183,6 @@ public class AggregateFlow(private val wrapped: AggregatePublisher) /** * Explain the execution plan for this operation with the given verbosity level * - * @param R the type of the document class * @param verbosity the verbosity of the explanation * @return the execution plan * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt index 4a214d6282c..55bfeb82060 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt @@ -39,6 +39,15 @@ import org.bson.BsonValue */ public class ChangeStreamFlow(private val wrapped: ChangeStreamPublisher) : Flow> { + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ChangeStreamFlow = apply { wrapped.batchSize(batchSize) } + /** * Sets the fullDocument value. * @@ -68,15 +77,6 @@ public class ChangeStreamFlow(private val wrapped: ChangeStreamPublishe */ public fun resumeAfter(resumeToken: BsonDocument): ChangeStreamFlow = apply { wrapped.resumeAfter(resumeToken) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public fun batchSize(batchSize: Int): ChangeStreamFlow = apply { wrapped.batchSize(batchSize) } - /** * Sets the maximum await execution time on the server for this operation. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt index 6809b0b2777..6c53a1faf47 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt @@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.ClientSessionOptions import com.mongodb.ServerAddress import com.mongodb.TransactionOptions +import com.mongodb.internal.TimeoutContext import com.mongodb.reactivestreams.client.ClientSession as reactiveClientSession import com.mongodb.session.ClientSession as jClientSession import com.mongodb.session.ServerSession @@ -214,6 +215,18 @@ public class ClientSession(public val wrapped: reactiveClientSession) : jClientS public suspend fun abortTransaction() { wrapped.abortTransaction().awaitFirstOrNull() } + + /** + * Gets the timeout context to use with this session: + * * `MongoClientSettings#getTimeoutMS` + * * `ClientSessionOptions#getDefaultTimeout` + * + * Note: For internal use only + * + * @return the timeout to use + * @since 5.2 + */ + public override fun getTimeoutContext(): TimeoutContext? = wrapped.timeoutContext } /** diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt index 3583e4a2390..c65f7f6301c 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt @@ -15,6 +15,9 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.reactivestreams.client.DistinctPublisher import java.util.concurrent.TimeUnit @@ -41,6 +44,19 @@ public class DistinctFlow(private val wrapped: DistinctPublisher) : */ public fun batchSize(batchSize: Int): DistinctFlow = apply { wrapped.batchSize(batchSize) } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): DistinctFlow = apply { wrapped.timeoutMode(timeoutMode) } + /** * Sets the query filter to apply to the query. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt index 49a391c236f..f0afb4e9937 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt @@ -17,6 +17,9 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.CursorType import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.reactivestreams.client.FindPublisher import java.util.concurrent.TimeUnit @@ -45,6 +48,24 @@ public class FindFlow(private val wrapped: FindPublisher) : Flow */ public fun batchSize(batchSize: Int): FindFlow = apply { wrapped.batchSize(batchSize) } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * If the `timeout` is set then: + * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME] + * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure + * it as: [TimeoutMode.CURSOR_LIFETIME] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): FindFlow = apply { wrapped.timeoutMode(timeoutMode) } + /** * Sets the query filter to apply to the query. * @@ -250,7 +271,6 @@ public class FindFlow(private val wrapped: FindPublisher) : Flow /** * Explain the execution plan for this operation with the given verbosity level * - * @param R the type of the document class * @param verbosity the verbosity of the explanation * @return the execution plan * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt index bc205b7073f..a6dfd770e08 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt @@ -15,6 +15,9 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListCollectionsPublisher import java.util.concurrent.TimeUnit import kotlinx.coroutines.flow.Flow @@ -31,6 +34,31 @@ import org.bson.conversions.Bson */ public class ListCollectionsFlow(private val wrapped: ListCollectionsPublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListCollectionsFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListCollectionsFlow = apply { + wrapped.timeoutMode(timeoutMode) + } + /** * Sets the maximum execution time on the server for this operation. * @@ -43,15 +71,6 @@ public class ListCollectionsFlow(private val wrapped: ListCollectionsPu wrapped.maxTime(maxTime, timeUnit) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public fun batchSize(batchSize: Int): ListCollectionsFlow = apply { wrapped.batchSize(batchSize) } - /** * Sets the query filter to apply to the returned database names. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt index 4b56333bb38..473cde087b6 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt @@ -15,6 +15,9 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListDatabasesPublisher import java.util.concurrent.TimeUnit import kotlinx.coroutines.flow.Flow @@ -30,6 +33,29 @@ import org.bson.conversions.Bson * @see [List databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases/) */ public class ListDatabasesFlow(private val wrapped: ListDatabasesPublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListDatabasesFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListDatabasesFlow = apply { wrapped.timeoutMode(timeoutMode) } + /** * Sets the maximum execution time on the server for this operation. * @@ -42,15 +68,6 @@ public class ListDatabasesFlow(private val wrapped: ListDatabasesPublis wrapped.maxTime(maxTime, timeUnit) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public fun batchSize(batchSize: Int): ListDatabasesFlow = apply { wrapped.batchSize(batchSize) } - /** * Sets the query filter to apply to the returned database names. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt index 9e856d28ee3..b92453158a1 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt @@ -15,6 +15,9 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListIndexesPublisher import java.util.concurrent.TimeUnit import kotlinx.coroutines.flow.Flow @@ -29,6 +32,29 @@ import org.bson.BsonValue * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) */ public class ListIndexesFlow(private val wrapped: ListIndexesPublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListIndexesFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListIndexesFlow = apply { wrapped.timeoutMode(timeoutMode) } + /** * Sets the maximum execution time on the server for this operation. * @@ -41,15 +67,6 @@ public class ListIndexesFlow(private val wrapped: ListIndexesPublisher< wrapped.maxTime(maxTime, timeUnit) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public fun batchSize(batchSize: Int): ListIndexesFlow = apply { wrapped.batchSize(batchSize) } - /** * Sets the comment for this operation. A null value means no comment is set. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt index ce355c69e41..1c7fe4ded5e 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt @@ -16,6 +16,9 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher import java.util.concurrent.TimeUnit @@ -36,6 +39,30 @@ import org.bson.Document public class ListSearchIndexesFlow(private val wrapped: ListSearchIndexesPublisher) : Flow by wrapped.asFlow() { + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListSearchIndexesFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesFlow = apply { + wrapped.timeoutMode(timeoutMode) + } + /** * Sets an Atlas Search index name for this operation. * @@ -55,15 +82,6 @@ public class ListSearchIndexesFlow(private val wrapped: ListSearchIndex wrapped.allowDiskUse(allowDiskUse) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size. - * @return this. - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public fun batchSize(batchSize: Int): ListSearchIndexesFlow = apply { wrapped.batchSize(batchSize) } - /** * Sets the maximum execution time on the server for this operation. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt index 1849f9ae92f..407f1b8fe39 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt @@ -17,6 +17,9 @@ package com.mongodb.kotlin.client.coroutine +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.client.model.MapReduceAction import com.mongodb.reactivestreams.client.MapReducePublisher @@ -37,6 +40,7 @@ import org.bson.conversions.Bson */ @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) public class MapReduceFlow(private val wrapped: MapReducePublisher) : Flow by wrapped.asFlow() { + /** * Sets the number of documents to return per batch. * @@ -46,6 +50,19 @@ public class MapReduceFlow(private val wrapped: MapReducePublisher) */ public fun batchSize(batchSize: Int): MapReduceFlow = apply { wrapped.batchSize(batchSize) } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): MapReduceFlow = apply { wrapped.timeoutMode(timeoutMode) } + /** * Aggregates documents to a collection according to the specified map-reduce function with the given options, which * must specify a non-inline result. diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt index fc97c2e3bb4..c4c2acc27f6 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt @@ -24,11 +24,7 @@ import com.mongodb.lang.Nullable import com.mongodb.reactivestreams.client.MongoClient as JMongoClient import com.mongodb.reactivestreams.client.MongoClients as JMongoClients import java.io.Closeable -import kotlinx.coroutines.flow.Flow -import kotlinx.coroutines.reactive.asFlow -import kotlinx.coroutines.reactive.awaitSingle -import org.bson.Document -import org.bson.conversions.Bson +import java.util.concurrent.TimeUnit /** * A client-side representation of a MongoDB cluster. @@ -42,7 +38,7 @@ import org.bson.conversions.Bson * * @see MongoClient.create */ -public class MongoClient(private val wrapped: JMongoClient) : Closeable { +public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapped), Closeable { /** * A factory for [MongoClient] instances. @@ -112,176 +108,13 @@ public class MongoClient(private val wrapped: JMongoClient) : Closeable { * @see com.mongodb.MongoClientSettings.Builder.applyToClusterSettings */ public fun getClusterDescription(): ClusterDescription = wrapped.clusterDescription - - /** - * Gets a [MongoDatabase] instance for the given database name. - * - * @param databaseName the name of the database to retrievecom.mongodb.connection. - * @return a `MongoDatabase` representing the specified database - * @throws IllegalArgumentException if databaseName is invalid - * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity - */ - public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName)) - - /** - * Creates a client session. - * - * Note: A ClientSession instance can not be used concurrently in multiple operations. - * - * @param options the options for the client session - * @return the client session - */ - public suspend fun startSession( - options: ClientSessionOptions = ClientSessionOptions.builder().build() - ): ClientSession = ClientSession(wrapped.startSession(options).awaitSingle()) - - /** - * Get a list of the database names - * - * @return an iterable containing all the names of all the databases - * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) - */ - public fun listDatabaseNames(): Flow = wrapped.listDatabaseNames().asFlow() - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @return the list databases iterable interface - * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) - */ - public fun listDatabaseNames(clientSession: ClientSession): Flow = - wrapped.listDatabaseNames(clientSession.wrapped).asFlow() - - /** - * Gets the list of databases - * - * @return the list databases iterable interface - */ - @JvmName("listDatabasesAsDocument") - public fun listDatabases(): ListDatabasesFlow = listDatabases() - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @return the list databases iterable interface - */ - @JvmName("listDatabasesAsDocumentWithSession") - public fun listDatabases(clientSession: ClientSession): ListDatabasesFlow = - listDatabases(clientSession) - - /** - * Gets the list of databases - * - * @param T the type of the class to use - * @param resultClass the target document type of the iterable. - * @return the list databases iterable interface - */ - public fun listDatabases(resultClass: Class): ListDatabasesFlow = - ListDatabasesFlow(wrapped.listDatabases(resultClass)) - - /** - * Gets the list of databases - * - * @param T the type of the class to use - * @param clientSession the client session with which to associate this operation - * @param resultClass the target document type of the iterable. - * @return the list databases iterable interface - */ - public fun listDatabases(clientSession: ClientSession, resultClass: Class): ListDatabasesFlow = - ListDatabasesFlow(wrapped.listDatabases(clientSession.wrapped, resultClass)) - - /** - * Gets the list of databases - * - * @param T the type of the class to use - * @return the list databases iterable interface - */ - public inline fun listDatabases(): ListDatabasesFlow = listDatabases(T::class.java) - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @param T the type of the class to use - * @return the list databases iterable interface - */ - public inline fun listDatabases(clientSession: ClientSession): ListDatabasesFlow = - listDatabases(clientSession, T::class.java) - - /** - * Creates a change stream for this client. - * - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - @JvmName("watchAsDocument") - public fun watch(pipeline: List = emptyList()): ChangeStreamFlow = watch(pipeline) - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - @JvmName("watchAsDocumentWithSession") - public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamFlow = - watch(clientSession, pipeline) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @param resultClass the target document type of the iterable. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamFlow = - ChangeStreamFlow(wrapped.watch(pipeline, resultClass)) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @param resultClass the target document type of the iterable. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public fun watch( - clientSession: ClientSession, - pipeline: List = emptyList(), - resultClass: Class - ): ChangeStreamFlow = ChangeStreamFlow(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public inline fun watch(pipeline: List = emptyList()): ChangeStreamFlow = - watch(pipeline, T::class.java) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public inline fun watch( - clientSession: ClientSession, - pipeline: List = emptyList() - ): ChangeStreamFlow = watch(clientSession, pipeline, T::class.java) } + +/** + * ClientSessionOptions.Builder.defaultTimeout extension function + * + * @param defaultTimeout time in milliseconds + * @return the options + */ +public fun ClientSessionOptions.Builder.defaultTimeout(defaultTimeout: Long): ClientSessionOptions.Builder = + this.apply { defaultTimeout(defaultTimeout, TimeUnit.MILLISECONDS) } diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt new file mode 100644 index 00000000000..88df39dd23d --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt @@ -0,0 +1,310 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ClientSessionOptions +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.reactivestreams.client.MongoCluster as JMongoCluster +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.reactive.asFlow +import kotlinx.coroutines.reactive.awaitSingle +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** + * The client-side representation of a MongoDB cluster operations. + * + * The originating [MongoClient] is responsible for the closing of resources. If the originator [MongoClient] is closed, + * then any operations will fail. + * + * @see MongoClient + * @since 5.2 + */ +public open class MongoCluster protected constructor(private val wrapped: JMongoCluster) { + + /** The codec registry. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** The read concern. */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The read preference. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** The write concern. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new MongoCluster instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCluster = + MongoCluster(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param newReadPreference the new [ReadPreference] for the database + * @return a new MongoCluster instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoCluster = + MongoCluster(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the database + * @return a new MongoCluster instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoCluster = + MongoCluster(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param newWriteConcern the new [WriteConcern] for the database + * @return a new MongoCluster instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCluster = + MongoCluster(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCluster instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCluster = + MongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Gets a [MongoDatabase] instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a `MongoDatabase` representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity + */ + public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName)) + + /** + * Creates a client session. + * + * Note: A ClientSession instance can not be used concurrently in multiple operations. + * + * @param options the options for the client session + * @return the client session + */ + public suspend fun startSession( + options: ClientSessionOptions = ClientSessionOptions.builder().build() + ): ClientSession = ClientSession(wrapped.startSession(options).awaitSingle()) + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(): Flow = wrapped.listDatabaseNames().asFlow() + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(clientSession: ClientSession): Flow = + wrapped.listDatabaseNames(clientSession.wrapped).asFlow() + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocument") + public fun listDatabases(): ListDatabasesFlow = listDatabases() + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocumentWithSession") + public fun listDatabases(clientSession: ClientSession): ListDatabasesFlow = + listDatabases(clientSession) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(resultClass: Class): ListDatabasesFlow = + ListDatabasesFlow(wrapped.listDatabases(resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(clientSession: ClientSession, resultClass: Class): ListDatabasesFlow = + ListDatabasesFlow(wrapped.listDatabases(clientSession.wrapped, resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(): ListDatabasesFlow = listDatabases(T::class.java) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(clientSession: ClientSession): ListDatabasesFlow = + listDatabases(clientSession, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamFlow = watch(pipeline) + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamFlow = + watch(clientSession, pipeline) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamFlow = + ChangeStreamFlow(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamFlow = ChangeStreamFlow(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamFlow = + watch(pipeline, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamFlow = watch(clientSession, pipeline, T::class.java) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt index b1026c359f9..5602b5ecd11 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt @@ -19,6 +19,8 @@ import com.mongodb.MongoNamespace import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.bulk.BulkWriteResult import com.mongodb.client.model.BulkWriteOptions import com.mongodb.client.model.CountOptions @@ -87,6 +89,28 @@ public class MongoCollection(private val wrapped: JMongoCollection) public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new collection instance with a different default class to cast any documents returned from the database * into. @@ -150,6 +174,21 @@ public class MongoCollection(private val wrapped: JMongoCollection) public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection = MongoCollection(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCollection instance with the set time limit for operations + * @see [MongoCollection.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection = + MongoCollection(wrapped.withTimeout(timeout, timeUnit)) + /** * Counts the number of documents in the collection. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt index bf40401a0a1..007251bab31 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt @@ -18,6 +18,8 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.CreateViewOptions import com.mongodb.reactivestreams.client.MongoDatabase as JMongoDatabase @@ -55,6 +57,28 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -98,6 +122,21 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase = MongoDatabase(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoDatabase instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + /** * Gets a collection. * @@ -150,6 +189,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param command the command to be run * @param readPreference the [ReadPreference] to be used when executing the command, defaults to @@ -166,6 +208,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param clientSession the client session with which to associate this operation * @param command the command to be run @@ -184,6 +229,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param command the command to be run * @param readPreference the [ReadPreference] to be used when executing the command, defaults to @@ -198,6 +246,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param clientSession the client session with which to associate this operation * @param command the command to be run diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt index cf8ebaa02cf..07953277d5a 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.ExplainVerbosity +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.reactivestreams.client.AggregatePublisher import java.util.concurrent.TimeUnit @@ -71,6 +72,7 @@ class AggregateFlowTest { flow.maxAwaitTime(1, TimeUnit.SECONDS) flow.maxTime(1) flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).allowDiskUse(true) verify(wrapped).batchSize(batchSize) @@ -85,6 +87,7 @@ class AggregateFlowTest { verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) verify(wrapped).let(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) whenever(wrapped.explain(Document::class.java)).doReturn(Mono.fromCallable { Document() }) whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(Mono.fromCallable { Document() }) diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt index fa3b25f92dd..571c6f579bb 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt @@ -15,6 +15,7 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.reactivestreams.client.DistinctPublisher import java.util.concurrent.TimeUnit @@ -55,6 +56,7 @@ class DistinctFlowTest { flow.filter(filter) flow.maxTime(1) flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).batchSize(batchSize) verify(wrapped).collation(collation) @@ -63,6 +65,7 @@ class DistinctFlowTest { verify(wrapped).filter(filter) verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt index 9243748f1af..ae4f13639eb 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt @@ -29,6 +29,7 @@ class ExtensionMethodsTest { "CountOptions", "CreateCollectionOptions", "CreateIndexOptions", + "ClientSessionOptions", "DropIndexOptions", "EstimatedDocumentCountOptions", "FindOneAndDeleteOptions", diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt index 2216c044883..450059c8211 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt @@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.CursorType import com.mongodb.ExplainVerbosity +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.reactivestreams.client.FindPublisher import java.util.concurrent.TimeUnit @@ -78,6 +79,7 @@ class FindFlowTest { flow.showRecordId(true) flow.skip(1) flow.sort(bson) + flow.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).allowDiskUse(true) verify(wrapped).batchSize(batchSize) @@ -103,6 +105,7 @@ class FindFlowTest { verify(wrapped).showRecordId(true) verify(wrapped).skip(1) verify(wrapped).sort(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) whenever(wrapped.explain(Document::class.java)).doReturn(Mono.fromCallable { Document() }) whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(Mono.fromCallable { Document() }) diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt index a84b4990129..c2aa221c98e 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt @@ -38,6 +38,7 @@ class ListCollectionNamesFlowTest { } @Test + @Suppress("DEPRECATION") fun shouldCallTheUnderlyingMethods() { val wrapped: ListCollectionNamesPublisher = mock() val flow = ListCollectionNamesFlow(wrapped) diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt index 98d16113ff9..59c6f896c86 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt @@ -15,6 +15,7 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListCollectionsPublisher import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions @@ -54,6 +55,7 @@ class ListCollectionsFlowTest { flow.filter(filter) flow.maxTime(1) flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).batchSize(batchSize) verify(wrapped).comment(bsonComment) @@ -61,6 +63,7 @@ class ListCollectionsFlowTest { verify(wrapped).filter(filter) verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt index 53e44f740f1..eac18960b3f 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt @@ -15,6 +15,7 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListDatabasesPublisher import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions @@ -55,6 +56,7 @@ class ListDatabasesFlowTest { flow.maxTime(1) flow.maxTime(1, TimeUnit.SECONDS) flow.nameOnly(true) + flow.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).authorizedDatabasesOnly(true) verify(wrapped).batchSize(batchSize) @@ -64,6 +66,7 @@ class ListDatabasesFlowTest { verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) verify(wrapped).nameOnly(true) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt index 69287d1918d..d84765d428b 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt @@ -15,6 +15,7 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListIndexesPublisher import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions @@ -50,12 +51,14 @@ class ListIndexesFlowTest { flow.comment(comment) flow.maxTime(1) flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).batchSize(batchSize) verify(wrapped).comment(bsonComment) verify(wrapped).comment(comment) verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt index 440566fcae8..b9ef9133e87 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt @@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.coroutine +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.client.model.MapReduceAction import com.mongodb.reactivestreams.client.MapReducePublisher @@ -71,6 +72,7 @@ class MapReduceFlowTest { flow.sort(bson) flow.verbose(true) flow.action(MapReduceAction.MERGE) + flow.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).batchSize(batchSize) verify(wrapped).bypassDocumentValidation(true) @@ -87,6 +89,7 @@ class MapReduceFlowTest { verify(wrapped).sort(bson) verify(wrapped).verbose(true) verify(wrapped).action(MapReduceAction.MERGE) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) whenever(wrapped.toCollection()).doReturn(Mono.empty()) runBlocking { flow.toCollection() } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt index e8e121f85dc..7be5c068a84 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt @@ -72,7 +72,16 @@ class MongoCollectionTest { fun shouldHaveTheSameMethods() { val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet() val kMongoCollectionFunctions = - MongoCollection::class.declaredFunctions.map { it.name }.toSet() + + MongoCollection::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoCollection::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt index 4ba7502bd24..031e2e6d1ef 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt @@ -54,7 +54,16 @@ class MongoDatabaseTest { fun shouldHaveTheSameMethods() { val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet() val kMongoDatabaseFunctions = - MongoDatabase::class.declaredFunctions.map { it.name }.toSet() + + MongoDatabase::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoDatabase::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt index 2640e6250d7..b563c67c368 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt @@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.syncadapter import com.mongodb.ExplainVerbosity import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.AggregateIterable import java.util.concurrent.TimeUnit @@ -27,6 +28,9 @@ import org.bson.conversions.Bson internal class SyncAggregateIterable(val wrapped: AggregateIterable) : JAggregateIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncAggregateIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncAggregateIterable = apply { + wrapped.timeoutMode(timeoutMode) + } override fun toCollection() = wrapped.toCollection() diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt index 53d791bd423..64cd27b776f 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt @@ -20,6 +20,7 @@ import com.mongodb.ServerAddress import com.mongodb.TransactionOptions import com.mongodb.client.ClientSession as JClientSession import com.mongodb.client.TransactionBody +import com.mongodb.internal.TimeoutContext import com.mongodb.kotlin.client.ClientSession import com.mongodb.session.ServerSession import org.bson.BsonDocument @@ -90,4 +91,6 @@ internal class SyncClientSession(internal val wrapped: ClientSession, private va override fun withTransaction(transactionBody: TransactionBody, options: TransactionOptions): T = throw UnsupportedOperationException() + + override fun getTimeoutContext(): TimeoutContext = throw UnsupportedOperationException() } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt index ef580954e20..91cf8165a3a 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.syncadapter import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.DistinctIterable import java.util.concurrent.TimeUnit @@ -25,6 +26,9 @@ import org.bson.conversions.Bson internal class SyncDistinctIterable(val wrapped: DistinctIterable) : JDistinctIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncDistinctIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncDistinctIterable = apply { + wrapped.timeoutMode(timeoutMode) + } override fun filter(filter: Bson?): SyncDistinctIterable = apply { wrapped.filter(filter) } override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncDistinctIterable = apply { wrapped.maxTime(maxTime, timeUnit) diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt index f179f4ff6bc..81247aeb2a0 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt @@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.syncadapter import com.mongodb.CursorType import com.mongodb.ExplainVerbosity import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.FindIterable import java.util.concurrent.TimeUnit @@ -28,6 +29,7 @@ import org.bson.conversions.Bson internal class SyncFindIterable(val wrapped: FindIterable) : JFindIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncFindIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncFindIterable = apply { wrapped.timeoutMode(timeoutMode) } override fun filter(filter: Bson?): SyncFindIterable = apply { wrapped.filter(filter) } override fun limit(limit: Int): SyncFindIterable = apply { wrapped.limit(limit) } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt index 74579b15a20..f38e7eed5e7 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.syncadapter import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.kotlin.client.ListCollectionsIterable import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -25,6 +26,9 @@ internal class SyncListCollectionsIterable(val wrapped: ListCollections JListCollectionsIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncListCollectionsIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListCollectionsIterable = apply { + wrapped.timeoutMode(timeoutMode) + } override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionsIterable = apply { wrapped.maxTime(maxTime, timeUnit) diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt index 2e0e662a65d..34874827826 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.syncadapter import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.kotlin.client.ListDatabasesIterable import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -25,6 +26,9 @@ internal class SyncListDatabasesIterable(val wrapped: ListDatabasesIter JListDatabasesIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncListDatabasesIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListDatabasesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListDatabasesIterable = apply { wrapped.maxTime(maxTime, timeUnit) diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt index b9133970cb3..56e5fec91cd 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client.syncadapter import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.kotlin.client.ListIndexesIterable import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -23,6 +24,9 @@ import org.bson.BsonValue internal class SyncListIndexesIterable(val wrapped: ListIndexesIterable) : JListIndexesIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncListIndexesIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListIndexesIterable = apply { wrapped.maxTime(maxTime, timeUnit) } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt index c63a249eeb0..b0e6d522b7e 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt @@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.syncadapter import com.mongodb.ExplainVerbosity import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.kotlin.client.ListSearchIndexesIterable import java.util.concurrent.TimeUnit @@ -26,6 +27,9 @@ import org.bson.Document internal class SyncListSearchIndexesIterable(val wrapped: ListSearchIndexesIterable) : JListSearchIndexesIterable, SyncMongoIterable(wrapped) { override fun batchSize(batchSize: Int): SyncListSearchIndexesIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListSearchIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } override fun name(indexName: String): SyncListSearchIndexesIterable = apply { wrapped.name(indexName) } override fun allowDiskUse(allowDiskUse: Boolean?): com.mongodb.client.ListSearchIndexesIterable = apply { diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt index 9c3af8af290..16660562a33 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt @@ -15,75 +15,12 @@ */ package com.mongodb.kotlin.client.syncadapter -import com.mongodb.ClientSessionOptions -import com.mongodb.client.ChangeStreamIterable -import com.mongodb.client.ClientSession -import com.mongodb.client.ListDatabasesIterable import com.mongodb.client.MongoClient as JMongoClient -import com.mongodb.client.MongoDatabase -import com.mongodb.client.MongoIterable import com.mongodb.connection.ClusterDescription import com.mongodb.kotlin.client.MongoClient -import org.bson.Document -import org.bson.conversions.Bson -internal class SyncMongoClient(val wrapped: MongoClient) : JMongoClient { +internal class SyncMongoClient(override val wrapped: MongoClient) : SyncMongoCluster(wrapped), JMongoClient { override fun close(): Unit = wrapped.close() - override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName)) - - override fun startSession(): ClientSession = SyncClientSession(wrapped.startSession(), this) - - override fun startSession(options: ClientSessionOptions): ClientSession = - SyncClientSession(wrapped.startSession(options), this) - - override fun listDatabaseNames(): MongoIterable = SyncMongoIterable(wrapped.listDatabaseNames()) - - override fun listDatabaseNames(clientSession: ClientSession): MongoIterable = - SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped())) - - override fun listDatabases(): ListDatabasesIterable = SyncListDatabasesIterable(wrapped.listDatabases()) - - override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = - SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped())) - - override fun listDatabases(resultClass: Class): ListDatabasesIterable = - SyncListDatabasesIterable(wrapped.listDatabases(resultClass)) - - override fun listDatabases( - clientSession: ClientSession, - resultClass: Class - ): ListDatabasesIterable = - SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass)) - - override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) - - override fun watch(resultClass: Class): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) - - override fun watch(pipeline: MutableList): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(pipeline)) - - override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) - - override fun watch(clientSession: ClientSession): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) - - override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) - - override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) - - override fun watch( - clientSession: ClientSession, - pipeline: MutableList, - resultClass: Class - ): ChangeStreamIterable = - SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) - override fun getClusterDescription(): ClusterDescription = wrapped.clusterDescription - - private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt new file mode 100644 index 00000000000..7b948fa6d1d --- /dev/null +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.ClientSessionOptions +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.ChangeStreamIterable +import com.mongodb.client.ClientSession +import com.mongodb.client.ListDatabasesIterable +import com.mongodb.client.MongoCluster as JMongoCluster +import com.mongodb.client.MongoDatabase +import com.mongodb.client.MongoIterable +import com.mongodb.kotlin.client.MongoCluster +import java.util.concurrent.TimeUnit +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoCluster { + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCluster = + SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadPreference(readPreference)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadConcern(readConcern)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withWriteConcern(writeConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoCluster = + SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName)) + + override fun startSession(): ClientSession = SyncClientSession(wrapped.startSession(), this) + + override fun startSession(options: ClientSessionOptions): ClientSession = + SyncClientSession(wrapped.startSession(options), this) + + override fun listDatabaseNames(): MongoIterable = SyncMongoIterable(wrapped.listDatabaseNames()) + + override fun listDatabaseNames(clientSession: ClientSession): MongoIterable = + SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped())) + + override fun listDatabases(): ListDatabasesIterable = SyncListDatabasesIterable(wrapped.listDatabases()) + + override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped())) + + override fun listDatabases(resultClass: Class): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(resultClass)) + + override fun listDatabases( + clientSession: ClientSession, + resultClass: Class + ): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass)) + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt index 952b05d32e5..51c3a7db7e1 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt @@ -56,6 +56,7 @@ import com.mongodb.client.result.InsertOneResult import com.mongodb.client.result.UpdateResult import com.mongodb.kotlin.client.MongoCollection import java.lang.UnsupportedOperationException +import java.util.concurrent.TimeUnit import org.bson.Document import org.bson.codecs.configuration.CodecRegistry import org.bson.conversions.Bson @@ -73,6 +74,7 @@ internal class SyncMongoCollection(val wrapped: MongoCollection) : J override fun getWriteConcern(): WriteConcern = wrapped.writeConcern override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) override fun withDocumentClass(clazz: Class): SyncMongoCollection = SyncMongoCollection(wrapped.withDocumentClass(clazz)) @@ -89,6 +91,9 @@ internal class SyncMongoCollection(val wrapped: MongoCollection) : J override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection = SyncMongoCollection(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection = + SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit)) + override fun countDocuments(): Long = wrapped.countDocuments() override fun countDocuments(filter: Bson): Long = wrapped.countDocuments(filter) diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt index 84a97bc2769..1111ee282ca 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt @@ -23,6 +23,7 @@ import com.mongodb.client.MongoDatabase as JMongoDatabase import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.CreateViewOptions import com.mongodb.kotlin.client.MongoDatabase +import java.util.concurrent.TimeUnit import org.bson.Document import org.bson.codecs.configuration.CodecRegistry import org.bson.conversions.Bson @@ -38,6 +39,8 @@ internal class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase = SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) @@ -50,6 +53,9 @@ internal class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase = SyncMongoDatabase(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + override fun getCollection(collectionName: String): MongoCollection = SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java)) diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt index 4940cad99d0..b5449a14645 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt @@ -16,7 +16,10 @@ package com.mongodb.kotlin.client import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -30,14 +33,32 @@ import org.bson.conversions.Bson * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate) */ public class AggregateIterable(private val wrapped: JAggregateIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): AggregateIterable { + super.batchSize(batchSize) + return this + } + /** - * Sets the number of documents to return per batch. + * Sets the timeoutMode for the cursor. * - * @param batchSize the batch size + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * If the `timeout` is set then: + * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME] + * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure + * it as: [TimeoutMode.CURSOR_LIFETIME] + * + * @param timeoutMode the timeout mode * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + * @since 5.2 */ - public override fun batchSize(batchSize: Int): AggregateIterable = apply { wrapped.batchSize(batchSize) } + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): AggregateIterable { + wrapped.timeoutMode(timeoutMode) + return this + } /** * Aggregates documents according to the specified aggregation pipeline, which must end with a $out or $merge stage. diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt index 95660682f0b..cf7cc35b0b0 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt @@ -37,6 +37,11 @@ import org.bson.BsonValue public class ChangeStreamIterable(private val wrapped: JChangeStreamIterable) : MongoIterable>(wrapped) { + public override fun batchSize(batchSize: Int): ChangeStreamIterable { + super.batchSize(batchSize) + return this + } + /** * Returns a cursor used for iterating over elements of type {@code ChangeStreamDocument}. The cursor has a * covariant return type to additionally provide a method to access the resume token in change stream batches. @@ -77,15 +82,6 @@ public class ChangeStreamIterable(private val wrapped: JChangeStreamIte wrapped.resumeAfter(resumeToken) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public override fun batchSize(batchSize: Int): ChangeStreamIterable = apply { wrapped.batchSize(batchSize) } - /** * Sets the maximum await execution time on the server for this operation. * diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt index de77215d033..f785eeca7e4 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt @@ -15,7 +15,10 @@ */ package com.mongodb.kotlin.client +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -28,6 +31,7 @@ import org.bson.conversions.Bson * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) */ public class DistinctIterable(private val wrapped: JDistinctIterable) : MongoIterable(wrapped) { + /** * Sets the number of documents to return per batch. * @@ -37,6 +41,19 @@ public class DistinctIterable(private val wrapped: JDistinctIterable = apply { wrapped.batchSize(batchSize) } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): DistinctIterable = apply { wrapped.timeoutMode(timeoutMode) } + /** * Sets the query filter to apply to the query. * diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt index 2a33cb6f268..81e1bb51864 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt @@ -17,7 +17,10 @@ package com.mongodb.kotlin.client import com.mongodb.CursorType import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -31,14 +34,32 @@ import org.bson.conversions.Bson * @see [Collection filter](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) */ public class FindIterable(private val wrapped: JFindIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): FindIterable { + super.batchSize(batchSize) + return this + } + /** - * Sets the number of documents to return per batch. + * Sets the timeoutMode for the cursor. * - * @param batchSize the batch size + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * If the `timeout` is set then: + * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME] + * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure + * it as: [TimeoutMode.CURSOR_LIFETIME] + * + * @param timeoutMode the timeout mode * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + * @since 5.2 */ - public override fun batchSize(batchSize: Int): FindIterable = apply { wrapped.batchSize(batchSize) } + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): FindIterable { + wrapped.timeoutMode(timeoutMode) + return this + } /** * Sets the query filter to apply to the query. diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt index 6ff8bc9c3fa..43b2a9ba510 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt @@ -15,7 +15,10 @@ */ package com.mongodb.kotlin.client +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode import java.util.concurrent.TimeUnit import org.bson.BsonValue import org.bson.conversions.Bson @@ -28,6 +31,28 @@ import org.bson.conversions.Bson */ public class ListCollectionsIterable(private val wrapped: JListCollectionsIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): ListCollectionsIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListCollectionsIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + /** * Sets the maximum execution time on the server for this operation. * @@ -40,15 +65,6 @@ public class ListCollectionsIterable(private val wrapped: JListCollecti wrapped.maxTime(maxTime, timeUnit) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public override fun batchSize(batchSize: Int): ListCollectionsIterable = apply { wrapped.batchSize(batchSize) } - /** * Sets the query filter to apply to the returned database names. * diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt index 560920b5e0d..dd9e1e0bcc8 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt @@ -15,7 +15,10 @@ */ package com.mongodb.kotlin.client +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode import java.util.concurrent.TimeUnit import org.bson.BsonValue import org.bson.conversions.Bson @@ -28,6 +31,28 @@ import org.bson.conversions.Bson */ public class ListDatabasesIterable(private val wrapped: JListDatabasesIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): ListDatabasesIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListDatabasesIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + /** * Sets the maximum execution time on the server for this operation. * @@ -40,15 +65,6 @@ public class ListDatabasesIterable(private val wrapped: JListDatabasesI wrapped.maxTime(maxTime, timeUnit) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public override fun batchSize(batchSize: Int): ListDatabasesIterable = apply { wrapped.batchSize(batchSize) } - /** * Sets the query filter to apply to the returned database names. * diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt index 36847cb49d8..cc4449384b8 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt @@ -15,7 +15,10 @@ */ package com.mongodb.kotlin.client +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -26,6 +29,28 @@ import org.bson.BsonValue * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) */ public class ListIndexesIterable(private val wrapped: JListIndexesIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): ListIndexesIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListIndexesIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + /** * Sets the maximum execution time on the server for this operation. * @@ -38,15 +63,6 @@ public class ListIndexesIterable(private val wrapped: JListIndexesItera wrapped.maxTime(maxTime, timeUnit) } - /** - * Sets the number of documents to return per batch. - * - * @param batchSize the batch size - * @return this - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) - */ - public override fun batchSize(batchSize: Int): ListIndexesIterable = apply { wrapped.batchSize(batchSize) } - /** * Sets the comment for this operation. A null value means no comment is set. * diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt index 5b370702923..aa0dc1664bd 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt @@ -16,7 +16,10 @@ package com.mongodb.kotlin.client import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import java.util.concurrent.TimeUnit import org.bson.BsonValue @@ -31,22 +34,34 @@ import org.bson.Document public class ListSearchIndexesIterable(private val wrapped: JListSearchIndexesIterable) : MongoIterable(wrapped) { + public override fun batchSize(batchSize: Int): ListSearchIndexesIterable { + super.batchSize(batchSize) + return this + } + /** - * Sets an Atlas Search index name for this operation. + * Sets the timeoutMode for the cursor. * - * @param indexName Atlas Search index name. - * @return this. + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 */ - public fun name(indexName: String): ListSearchIndexesIterable = apply { wrapped.name(indexName) } + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesIterable { + wrapped.timeoutMode(timeoutMode) + return this + } /** - * Sets the number of documents to return per batch. + * Sets an Atlas Search index name for this operation. * - * @param batchSize the batch size. + * @param indexName Atlas Search index name. * @return this. - * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) */ - public override fun batchSize(batchSize: Int): ListSearchIndexesIterable = apply { wrapped.batchSize(batchSize) } + public fun name(indexName: String): ListSearchIndexesIterable = apply { wrapped.name(indexName) } /** * Enables writing to temporary files. A null value indicates that it's unspecified. diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt index 4cae28c973f..bdf2ba30bd5 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt @@ -23,8 +23,7 @@ import com.mongodb.client.MongoClient as JMongoClient import com.mongodb.client.MongoClients as JMongoClients import com.mongodb.connection.ClusterDescription import java.io.Closeable -import org.bson.Document -import org.bson.conversions.Bson +import java.util.concurrent.TimeUnit /** * A client-side representation of a MongoDB cluster. @@ -38,7 +37,7 @@ import org.bson.conversions.Bson * * @see MongoClient.create */ -public class MongoClient(private val wrapped: JMongoClient) : Closeable { +public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapped), Closeable { /** * A factory for [MongoClient] instances. @@ -108,175 +107,13 @@ public class MongoClient(private val wrapped: JMongoClient) : Closeable { */ public val clusterDescription: ClusterDescription get() = wrapped.clusterDescription - - /** - * Gets a [MongoDatabase] instance for the given database name. - * - * @param databaseName the name of the database to retrieve - * @return a `MongoDatabase` representing the specified database - * @throws IllegalArgumentException if databaseName is invalid - * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity - */ - public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName)) - - /** - * Creates a client session. - * - * Note: A ClientSession instance can not be used concurrently in multiple operations. - * - * @param options the options for the client session - * @return the client session - */ - public fun startSession(options: ClientSessionOptions = ClientSessionOptions.builder().build()): ClientSession = - ClientSession(wrapped.startSession(options)) - - /** - * Get a list of the database names - * - * @return an iterable containing all the names of all the databases - * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) - */ - public fun listDatabaseNames(): MongoIterable = MongoIterable(wrapped.listDatabaseNames()) - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @return the list databases iterable interface - * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) - */ - public fun listDatabaseNames(clientSession: ClientSession): MongoIterable = - MongoIterable(wrapped.listDatabaseNames(clientSession.wrapped)) - - /** - * Gets the list of databases - * - * @return the list databases iterable interface - */ - @JvmName("listDatabasesAsDocument") - public fun listDatabases(): ListDatabasesIterable = listDatabases() - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @return the list databases iterable interface - */ - @JvmName("listDatabasesAsDocumentWithSession") - public fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = - listDatabases(clientSession) - - /** - * Gets the list of databases - * - * @param T the type of the class to use - * @param resultClass the target document type of the iterable. - * @return the list databases iterable interface - */ - public fun listDatabases(resultClass: Class): ListDatabasesIterable = - ListDatabasesIterable(wrapped.listDatabases(resultClass)) - - /** - * Gets the list of databases - * - * @param T the type of the class to use - * @param clientSession the client session with which to associate this operation - * @param resultClass the target document type of the iterable. - * @return the list databases iterable interface - */ - public fun listDatabases(clientSession: ClientSession, resultClass: Class): ListDatabasesIterable = - ListDatabasesIterable(wrapped.listDatabases(clientSession.wrapped, resultClass)) - - /** - * Gets the list of databases - * - * @param T the type of the class to use - * @return the list databases iterable interface - */ - public inline fun listDatabases(): ListDatabasesIterable = listDatabases(T::class.java) - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @param T the type of the class to use - * @return the list databases iterable interface - */ - public inline fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = - listDatabases(clientSession, T::class.java) - - /** - * Creates a change stream for this client. - * - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - @JvmName("watchAsDocument") - public fun watch(pipeline: List = emptyList()): ChangeStreamIterable = watch(pipeline) - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - @JvmName("watchAsDocumentWithSession") - public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamIterable = - watch(clientSession, pipeline) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @param resultClass the target document type of the iterable. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamIterable = - ChangeStreamIterable(wrapped.watch(pipeline, resultClass)) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @param resultClass the target document type of the iterable. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public fun watch( - clientSession: ClientSession, - pipeline: List = emptyList(), - resultClass: Class - ): ChangeStreamIterable = ChangeStreamIterable(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public inline fun watch(pipeline: List = emptyList()): ChangeStreamIterable = - watch(pipeline, T::class.java) - - /** - * Creates a change stream for this client. - * - * @param T the target document type of the iterable. - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. - * @return the change stream iterable - * @see [Change Streams](https://dochub.mongodb.org/changestreams] - */ - public inline fun watch( - clientSession: ClientSession, - pipeline: List = emptyList() - ): ChangeStreamIterable = watch(clientSession, pipeline, T::class.java) } + +/** + * ClientSessionOptions.Builder.defaultTimeout extension function + * + * @param defaultTimeout time in milliseconds + * @return the options + */ +public fun ClientSessionOptions.Builder.defaultTimeout(defaultTimeout: Long): ClientSessionOptions.Builder = + this.apply { defaultTimeout(defaultTimeout, TimeUnit.MILLISECONDS) } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt new file mode 100644 index 00000000000..f541aaf1a9f --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt @@ -0,0 +1,306 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ClientSessionOptions +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.MongoCluster as JMongoCluster +import java.util.concurrent.TimeUnit +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** + * The client-side representation of a MongoDB cluster operations. + * + * The originating [MongoClient] is responsible for the closing of resources. If the originator [MongoClient] is closed, + * then any operations will fail. + * + * @see MongoClient + * @since 5.2 + */ +public open class MongoCluster protected constructor(private val wrapped: JMongoCluster) { + + /** The codec registry. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** The read concern. */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The read preference. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** The write concern. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new MongoCluster instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCluster = + MongoCluster(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param newReadPreference the new [ReadPreference] for the database + * @return a new MongoCluster instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoCluster = + MongoCluster(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the database + * @return a new MongoCluster instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoCluster = + MongoCluster(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param newWriteConcern the new [WriteConcern] for the database + * @return a new MongoCluster instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCluster = + MongoCluster(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCluster instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCluster = + MongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Gets a [MongoDatabase] instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a `MongoDatabase` representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity + */ + public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName)) + + /** + * Creates a client session. + * + * Note: A ClientSession instance can not be used concurrently in multiple operations. + * + * @param options the options for the client session + * @return the client session + */ + public fun startSession(options: ClientSessionOptions = ClientSessionOptions.builder().build()): ClientSession = + ClientSession(wrapped.startSession(options)) + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(): MongoIterable = MongoIterable(wrapped.listDatabaseNames()) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(clientSession: ClientSession): MongoIterable = + MongoIterable(wrapped.listDatabaseNames(clientSession.wrapped)) + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocument") + public fun listDatabases(): ListDatabasesIterable = listDatabases() + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocumentWithSession") + public fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + listDatabases(clientSession) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(resultClass: Class): ListDatabasesIterable = + ListDatabasesIterable(wrapped.listDatabases(resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(clientSession: ClientSession, resultClass: Class): ListDatabasesIterable = + ListDatabasesIterable(wrapped.listDatabases(clientSession.wrapped, resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(): ListDatabasesIterable = listDatabases(T::class.java) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + listDatabases(clientSession, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamIterable = watch(pipeline) + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamIterable = + watch(clientSession, pipeline) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamIterable = + ChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamIterable = ChangeStreamIterable(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamIterable = + watch(pipeline, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamIterable = watch(clientSession, pipeline, T::class.java) +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt index 786140caf12..9521c502460 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt @@ -19,6 +19,8 @@ import com.mongodb.MongoNamespace import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.bulk.BulkWriteResult import com.mongodb.client.MongoCollection as JMongoCollection import com.mongodb.client.model.BulkWriteOptions @@ -84,6 +86,28 @@ public class MongoCollection(private val wrapped: JMongoCollection) public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new collection instance with a different default class to cast any documents returned from the database * into. @@ -147,6 +171,21 @@ public class MongoCollection(private val wrapped: JMongoCollection) public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection = MongoCollection(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCollection instance with the set time limit for operations + * @see [MongoCollection.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection = + MongoCollection(wrapped.withTimeout(timeout, timeUnit)) + /** * Counts the number of documents in the collection. * diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt index b407195b079..714e82fa78e 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt @@ -76,6 +76,14 @@ public sealed interface MongoCursor : Iterator, Closeable { * } * ``` * + * A [com.mongodb.MongoOperationTimeoutException] does not invalidate the [MongoChangeStreamCursor], but is immediately + * propagated to the caller. Subsequent method calls will attempt to resume operation by establishing a new change + * stream on the server, without performing a `getMore` request first. + * + * If a [com.mongodb.MongoOperationTimeoutException] occurs before any events are received, it indicates that the server + * has timed out before it could finish processing the existing oplog. In such cases, it is recommended to close the + * current stream and recreate it with a higher timeout setting. + * * @param T The type of documents the cursor contains */ public sealed interface MongoChangeStreamCursor : MongoCursor { diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt index 988db01485a..d59ba628008 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt @@ -18,6 +18,8 @@ package com.mongodb.kotlin.client import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason import com.mongodb.client.MongoDatabase as JMongoDatabase import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.CreateViewOptions @@ -53,6 +55,28 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -96,6 +120,21 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase = MongoDatabase(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoDatabase instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + /** * Gets a collection. * @@ -120,6 +159,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param command the command to be run * @param readPreference the [ReadPreference] to be used when executing the command, defaults to * [MongoDatabase.readPreference] @@ -131,6 +173,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param readPreference the [ReadPreference] to be used when executing the command, defaults to @@ -146,6 +191,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param command the command to be run * @param readPreference the [ReadPreference] to be used when executing the command, defaults to @@ -162,6 +210,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param clientSession the client session with which to associate this operation * @param command the command to be run @@ -180,6 +231,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param command the command to be run * @param readPreference the [ReadPreference] to be used when executing the command, defaults to @@ -194,6 +248,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { /** * Executes the given command in the context of the current database with the given read preference. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * * @param T the class to decode each document into * @param clientSession the client session with which to associate this operation * @param command the command to be run diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt index ce1ed2dea47..89cc8db421e 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt @@ -17,6 +17,7 @@ package com.mongodb.kotlin.client import com.mongodb.ExplainVerbosity import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions @@ -79,6 +80,7 @@ class AggregateIterableTest { iterable.maxAwaitTime(1, TimeUnit.SECONDS) iterable.maxTime(1) iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).allowDiskUse(true) verify(wrapped).batchSize(batchSize) @@ -96,6 +98,7 @@ class AggregateIterableTest { verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) verify(wrapped).let(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) iterable.toCollection() verify(wrapped).toCollection() diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt index 63309969104..c3c4772f9d6 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt @@ -45,6 +45,7 @@ class ClientSessionTest { "getServerSession", "getSnapshotTimestamp", "getTransactionContext", + "getTimeoutContext", "notifyMessageSent", "notifyOperationInitiated", "setRecoveryToken", diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt index c9fc79e8128..91f5e9b6f44 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions @@ -31,7 +32,8 @@ import org.mockito.kotlin.verifyNoMoreInteractions class DistinctIterableTest { @Test fun shouldHaveTheSameMethods() { - val jDistinctIterableFunctions = JDistinctIterable::class.declaredFunctions.map { it.name }.toSet() + val jDistinctIterableFunctions = + JDistinctIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" val kDistinctIterableFunctions = DistinctIterable::class.declaredFunctions.map { it.name }.toSet() assertEquals(jDistinctIterableFunctions, kDistinctIterableFunctions) @@ -55,6 +57,7 @@ class DistinctIterableTest { iterable.filter(filter) iterable.maxTime(1) iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).batchSize(batchSize) verify(wrapped).collation(collation) @@ -63,6 +66,7 @@ class DistinctIterableTest { verify(wrapped).filter(filter) verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt index f0e7698124b..29374ff5c6b 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt @@ -29,6 +29,7 @@ class ExtensionMethodsTest { "CountOptions", "CreateCollectionOptions", "CreateIndexOptions", + "ClientSessionOptions", "DropIndexOptions", "EstimatedDocumentCountOptions", "FindOneAndDeleteOptions", diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt index 9d8d28104d1..0f4b2725b2e 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt @@ -18,6 +18,7 @@ package com.mongodb.kotlin.client import com.mongodb.CursorType import com.mongodb.ExplainVerbosity import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions @@ -31,7 +32,7 @@ import org.mockito.kotlin.* class FindIterableTest { @Test fun shouldHaveTheSameMethods() { - val jFindIterableFunctions = JFindIterable::class.declaredFunctions.map { it.name }.toSet() + val jFindIterableFunctions = JFindIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" val kFindIterableFunctions = FindIterable::class.declaredFunctions.map { it.name }.toSet() assertEquals(jFindIterableFunctions, kFindIterableFunctions) @@ -86,6 +87,7 @@ class FindIterableTest { iterable.showRecordId(true) iterable.skip(1) iterable.sort(bson) + iterable.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).allowDiskUse(true) verify(wrapped).batchSize(batchSize) @@ -114,6 +116,7 @@ class FindIterableTest { verify(wrapped).showRecordId(true) verify(wrapped).skip(1) verify(wrapped).sort(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt index b0c23b331e4..26dd071768c 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions import kotlin.test.assertEquals @@ -53,6 +54,7 @@ class ListCollectionsIterableTest { iterable.filter(filter) iterable.maxTime(1) iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).batchSize(batchSize) verify(wrapped).comment(bsonComment) @@ -60,6 +62,7 @@ class ListCollectionsIterableTest { verify(wrapped).filter(filter) verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt index c10ef133c1d..a1c95cad1a0 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions import kotlin.test.assertEquals @@ -30,7 +31,8 @@ import org.mockito.kotlin.verifyNoMoreInteractions class ListDatabasesIterableTest { @Test fun shouldHaveTheSameMethods() { - val jListDatabasesIterableFunctions = JListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet() + val jListDatabasesIterableFunctions = + JListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" val kListDatabasesIterableFunctions = ListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet() assertEquals(jListDatabasesIterableFunctions, kListDatabasesIterableFunctions) @@ -54,6 +56,7 @@ class ListDatabasesIterableTest { iterable.maxTime(1) iterable.maxTime(1, TimeUnit.SECONDS) iterable.nameOnly(true) + iterable.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).authorizedDatabasesOnly(true) verify(wrapped).batchSize(batchSize) @@ -63,6 +66,7 @@ class ListDatabasesIterableTest { verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) verify(wrapped).nameOnly(true) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt index 70c799eeee4..08bd5b4e685 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt @@ -16,6 +16,7 @@ package com.mongodb.kotlin.client import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode import java.util.concurrent.TimeUnit import kotlin.reflect.full.declaredFunctions import kotlin.test.assertEquals @@ -29,7 +30,8 @@ import org.mockito.kotlin.verifyNoMoreInteractions class ListIndexesIterableTest { @Test fun shouldHaveTheSameMethods() { - val jListIndexesIterableFunctions = JListIndexesIterable::class.declaredFunctions.map { it.name }.toSet() + val jListIndexesIterableFunctions = + JListIndexesIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" val kListIndexesIterableFunctions = ListIndexesIterable::class.declaredFunctions.map { it.name }.toSet() assertEquals(jListIndexesIterableFunctions, kListIndexesIterableFunctions) @@ -49,12 +51,14 @@ class ListIndexesIterableTest { iterable.comment(comment) iterable.maxTime(1) iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) verify(wrapped).batchSize(batchSize) verify(wrapped).comment(bsonComment) verify(wrapped).comment(comment) verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapped) } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt index d458c9302ce..e27b7852bba 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt @@ -71,7 +71,16 @@ class MongoCollectionTest { fun shouldHaveTheSameMethods() { val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet() - "mapReduce" val kMongoCollectionFunctions = - MongoCollection::class.declaredFunctions.map { it.name }.toSet() + + MongoCollection::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoCollection::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt index 6a7264545dc..1a7bc1d25c2 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt @@ -52,7 +52,16 @@ class MongoDatabaseTest { fun shouldHaveTheSameMethods() { val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet() val kMongoDatabaseFunctions = - MongoDatabase::class.declaredFunctions.map { it.name }.toSet() + + MongoDatabase::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoDatabase::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-legacy/src/main/com/mongodb/DB.java b/driver-legacy/src/main/com/mongodb/DB.java index df3a7b41076..7b47cfb8515 100644 --- a/driver-legacy/src/main/com/mongodb/DB.java +++ b/driver-legacy/src/main/com/mongodb/DB.java @@ -23,6 +23,7 @@ import com.mongodb.client.model.DBCreateViewOptions; import com.mongodb.client.model.ValidationAction; import com.mongodb.client.model.ValidationLevel; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.CommandReadOperation; import com.mongodb.internal.operation.CreateCollectionOperation; @@ -220,11 +221,15 @@ public String getName() { public Set getCollectionNames() { List collectionNames = new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, primary(), - mongo.getMongoClientOptions().getRetryReads()) { + mongo.getMongoClientOptions().getRetryReads(), DB.this.getTimeoutSettings()) { @Override public ReadOperation> asReadOperation() { - return new ListCollectionsOperation<>(name, commandCodec) - .nameOnly(true); + return new ListCollectionsOperation<>(name, commandCodec).nameOnly(true); + } + + @Override + protected OperationExecutor getExecutor() { + return executor; } }.map(result -> (String) result.get("name")).into(new ArrayList<>()); Collections.sort(collectionNames); @@ -304,8 +309,9 @@ public DBCollection createView(final String viewName, final String viewOn, final try { notNull("options", options); DBCollection view = getCollection(viewName); - executor.execute(new CreateViewOperation(name, viewName, viewOn, view.preparePipeline(pipeline), writeConcern) - .collation(options.getCollation()), getReadConcern()); + executor.execute(new CreateViewOperation(name, viewName, viewOn, + view.preparePipeline(pipeline), writeConcern) + .collation(options.getCollation()), getReadConcern()); return view; } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); @@ -380,7 +386,8 @@ private CreateCollectionOperation getCreateCollectionOperation(final String coll validationAction = ValidationAction.fromString((String) options.get("validationAction")); } Collation collation = DBObjectCollationHelper.createCollationFromOptions(options); - return new CreateCollectionOperation(getName(), collectionName, getWriteConcern()) + return new CreateCollectionOperation(getName(), collectionName, + getWriteConcern()) .capped(capped) .collation(collation) .sizeInBytes(sizeInBytes) @@ -513,13 +520,17 @@ public String toString() { } CommandResult executeCommand(final BsonDocument commandDocument, final ReadPreference readPreference) { - return new CommandResult(executor.execute(new CommandReadOperation<>(getName(), commandDocument, - new BsonDocumentCodec()), readPreference, getReadConcern()), getDefaultDBObjectCodec()); + return new CommandResult(executor.execute( + new CommandReadOperation<>(getName(), commandDocument, + new BsonDocumentCodec()), readPreference, getReadConcern(), null), getDefaultDBObjectCodec()); } OperationExecutor getExecutor() { return executor; } + TimeoutSettings getTimeoutSettings() { + return mongo.getTimeoutSettings(); + } private BsonDocument wrap(final DBObject document) { return new BsonDocumentWrapper<>(document, commandCodec); @@ -561,6 +572,11 @@ Codec getDefaultDBObjectCodec() { .withUuidRepresentation(getMongoClient().getMongoClientOptions().getUuidRepresentation()); } + @Nullable + Long getTimeoutMS() { + return mongo.getMongoClientOptions().getTimeout(); + } + private static final Set OBEDIENT_COMMANDS = new HashSet<>(); static { diff --git a/driver-legacy/src/main/com/mongodb/DBCollection.java b/driver-legacy/src/main/com/mongodb/DBCollection.java index e71fd8c3aa4..54eb354a877 100644 --- a/driver-legacy/src/main/com/mongodb/DBCollection.java +++ b/driver-legacy/src/main/com/mongodb/DBCollection.java @@ -26,6 +26,7 @@ import com.mongodb.client.model.DBCollectionFindOptions; import com.mongodb.client.model.DBCollectionRemoveOptions; import com.mongodb.client.model.DBCollectionUpdateOptions; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.bulk.DeleteRequest; import com.mongodb.internal.bulk.IndexRequest; import com.mongodb.internal.bulk.InsertRequest; @@ -84,6 +85,7 @@ import static com.mongodb.MongoNamespace.checkCollectionNameValidity; import static com.mongodb.ReadPreference.primary; import static com.mongodb.ReadPreference.primaryPreferred; +import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.Locks.withLock; import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; @@ -345,8 +347,8 @@ private Encoder toEncoder(@Nullable final DBEncoder dbEncoder) { private WriteResult insert(final List insertRequestList, final WriteConcern writeConcern, final boolean continueOnError, @Nullable final Boolean bypassDocumentValidation) { - return executeWriteOperation(createBulkWriteOperationForInsert(getNamespace(), !continueOnError, writeConcern, - retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation)); + return executeWriteOperation(createBulkWriteOperationForInsert(getNamespace(), + !continueOnError, writeConcern, retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation)); } WriteResult executeWriteOperation(final LegacyMixedBulkWriteOperation operation) { @@ -429,8 +431,8 @@ private WriteResult replaceOrInsert(final DBObject obj, final Object id, final W UpdateRequest replaceRequest = new UpdateRequest(wrap(filter), wrap(obj, objectCodec), Type.REPLACE).upsert(true); - return executeWriteOperation(createBulkWriteOperationForReplace(getNamespace(), false, writeConcern, retryWrites, - singletonList(replaceRequest))); + return executeWriteOperation(createBulkWriteOperationForReplace(getNamespace(), false, + writeConcern, retryWrites, singletonList(replaceRequest))); } /** @@ -582,8 +584,10 @@ public WriteResult update(final DBObject query, final DBObject update, final DBC .collation(options.getCollation()) .arrayFilters(wrapAllowNull(options.getArrayFilters(), options.getEncoder())); LegacyMixedBulkWriteOperation operation = (updateType == UPDATE - ? createBulkWriteOperationForUpdate(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest)) - : createBulkWriteOperationForReplace(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest))) + ? createBulkWriteOperationForUpdate(getNamespace(), true, writeConcern, retryWrites, + singletonList(updateRequest)) + : createBulkWriteOperationForReplace(getNamespace(), true, writeConcern, retryWrites, + singletonList(updateRequest))) .bypassDocumentValidation(options.getBypassDocumentValidation()); return executeWriteOperation(operation); } @@ -655,8 +659,8 @@ public WriteResult remove(final DBObject query, final DBCollectionRemoveOptions WriteConcern optionsWriteConcern = options.getWriteConcern(); WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); DeleteRequest deleteRequest = new DeleteRequest(wrap(query, options.getEncoder())).collation(options.getCollation()); - return executeWriteOperation(createBulkWriteOperationForDelete(getNamespace(), false, writeConcern, retryWrites, - singletonList(deleteRequest))); + return executeWriteOperation(createBulkWriteOperationForDelete(getNamespace(), false, + writeConcern, retryWrites, singletonList(deleteRequest))); } /** @@ -913,12 +917,12 @@ public long getCount(@Nullable final DBObject query) { */ public long getCount(@Nullable final DBObject query, final DBCollectionCountOptions options) { notNull("countOptions", options); - CountOperation operation = new CountOperation(getNamespace()) - .skip(options.getSkip()) - .limit(options.getLimit()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .collation(options.getCollation()) - .retryReads(retryReads); + CountOperation operation = new CountOperation( + getNamespace()) + .skip(options.getSkip()) + .limit(options.getLimit()) + .collation(options.getCollation()) + .retryReads(retryReads); if (query != null) { operation.filter(wrap(query)); } @@ -933,8 +937,9 @@ public long getCount(@Nullable final DBObject query, final DBCollectionCountOpti } ReadPreference optionsReadPreference = options.getReadPreference(); ReadConcern optionsReadConcern = options.getReadConcern(); - return executor.execute(operation, optionsReadPreference != null ? optionsReadPreference : getReadPreference(), - optionsReadConcern != null ? optionsReadConcern : getReadConcern()); + return getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, optionsReadPreference != null ? optionsReadPreference : getReadPreference(), + optionsReadConcern != null ? optionsReadConcern : getReadConcern(), null); } /** @@ -961,8 +966,8 @@ public DBCollection rename(final String newName) { public DBCollection rename(final String newName, final boolean dropTarget) { try { executor.execute(new RenameCollectionOperation(getNamespace(), - new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern()) - .dropTarget(dropTarget), getReadConcern()); + new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern()) + .dropTarget(dropTarget), getReadConcern()); return getDB().getCollection(newName); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); @@ -1029,9 +1034,9 @@ public List distinct(final String fieldName, final DBObject query, final ReadPre public List distinct(final String fieldName, final DBCollectionDistinctOptions options) { notNull("fieldName", fieldName); return new MongoIterableImpl(null, executor, - options.getReadConcern() != null ? options.getReadConcern() : getReadConcern(), - options.getReadPreference() != null ? options.getReadPreference() : getReadPreference(), - retryReads) { + options.getReadConcern() != null ? options.getReadConcern() : getReadConcern(), + options.getReadPreference() != null ? options.getReadPreference() : getReadPreference(), + retryReads, DBCollection.this.getTimeoutSettings()) { @Override public ReadOperation> asReadOperation() { return new DistinctOperation<>(getNamespace(), fieldName, new BsonValueCodec()) @@ -1039,6 +1044,12 @@ public ReadOperation> asReadOperation() { .collation(options.getCollation()) .retryReads(retryReads); } + + @Override + protected OperationExecutor getExecutor() { + return executor; + } + }.map(bsonValue -> { if (bsonValue == null) { return null; @@ -1116,16 +1127,15 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { Boolean jsMode = command.getJsMode(); if (command.getOutputType() == MapReduceCommand.OutputType.INLINE) { - MapReduceWithInlineResultsOperation operation = - new MapReduceWithInlineResultsOperation<>(getNamespace(), new BsonJavaScript(command.getMap()), - new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec()) - .filter(wrapAllowNull(command.getQuery())) - .limit(command.getLimit()) - .maxTime(command.getMaxTime(MILLISECONDS), MILLISECONDS) - .jsMode(jsMode != null && jsMode) - .sort(wrapAllowNull(command.getSort())) - .verbose(command.isVerbose()) - .collation(command.getCollation()); + MapReduceWithInlineResultsOperation operation = new MapReduceWithInlineResultsOperation<>( + getNamespace(), new BsonJavaScript(command.getMap()), + new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec()) + .filter(wrapAllowNull(command.getQuery())) + .limit(command.getLimit()) + .jsMode(jsMode != null && jsMode) + .sort(wrapAllowNull(command.getSort())) + .verbose(command.isVerbose()) + .collation(command.getCollation()); if (scope != null) { operation.scope(wrap(new BasicDBObject(scope))); @@ -1133,7 +1143,9 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { if (command.getFinalize() != null) { operation.finalizeFunction(new BsonJavaScript(command.getFinalize())); } - MapReduceBatchCursor executionResult = executor.execute(operation, readPreference, getReadConcern()); + MapReduceBatchCursor executionResult = + getExecutor(createTimeoutSettings(getTimeoutSettings(), command)) + .execute(operation, readPreference, getReadConcern(), null); return new MapReduceOutput(command.toDBObject(), executionResult); } else { String action; @@ -1152,14 +1164,11 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { } MapReduceToCollectionOperation operation = - new MapReduceToCollectionOperation(getNamespace(), - new BsonJavaScript(command.getMap()), - new BsonJavaScript(command.getReduce()), - command.getOutputTarget(), - getWriteConcern()) + new MapReduceToCollectionOperation( + getNamespace(), new BsonJavaScript(command.getMap()), new BsonJavaScript(command.getReduce()), + command.getOutputTarget(), getWriteConcern()) .filter(wrapAllowNull(command.getQuery())) .limit(command.getLimit()) - .maxTime(command.getMaxTime(MILLISECONDS), MILLISECONDS) .jsMode(jsMode != null && jsMode) .sort(wrapAllowNull(command.getSort())) .verbose(command.isVerbose()) @@ -1225,27 +1234,31 @@ public Cursor aggregate(final List pipeline, final Aggregati BsonValue outCollection = stages.get(stages.size() - 1).get("$out"); if (outCollection != null) { - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), stages, - getReadConcern(), getWriteConcern()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .allowDiskUse(options.getAllowDiskUse()) - .bypassDocumentValidation(options.getBypassDocumentValidation()) - .collation(options.getCollation()); + AggregateToCollectionOperation operation = + new AggregateToCollectionOperation( + getNamespace(), stages, getReadConcern(), getWriteConcern()) + .allowDiskUse(options.getAllowDiskUse()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()); try { - executor.execute(operation, getReadPreference(), getReadConcern()); + getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, getReadPreference(), getReadConcern(), null); result = new DBCursor(database.getCollection(outCollection.asString().getValue()), new BasicDBObject(), new DBCollectionFindOptions().readPreference(primary()).collation(options.getCollation())); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } } else { - AggregateOperation operation = new AggregateOperation<>(getNamespace(), stages, getDefaultDBObjectCodec()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) + AggregateOperation operation = new AggregateOperation<>( + getNamespace(), stages, + getDefaultDBObjectCodec()) .allowDiskUse(options.getAllowDiskUse()) .batchSize(options.getBatchSize()) .collation(options.getCollation()) .retryReads(retryReads); - BatchCursor cursor1 = executor.execute(operation, readPreference, getReadConcern()); + BatchCursor cursor1 = + getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, readPreference, getReadConcern(), null); result = new MongoCursorAdapter(new MongoBatchCursorAdapter<>(cursor1)); } return result; @@ -1262,14 +1275,14 @@ public Cursor aggregate(final List pipeline, final Aggregati * @mongodb.server.release 3.6 */ public CommandResult explainAggregate(final List pipeline, final AggregationOptions options) { - AggregateOperation operation = new AggregateOperation<>(getNamespace(), preparePipeline(pipeline), - new BsonDocumentCodec()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .allowDiskUse(options.getAllowDiskUse()) - .collation(options.getCollation()) - .retryReads(retryReads); - return new CommandResult(executor.execute(operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()), - primaryPreferred(), getReadConcern()), getDefaultDBObjectCodec()); + AggregateOperation operation = new AggregateOperation<>( + getNamespace(), + preparePipeline(pipeline), new BsonDocumentCodec()) + .allowDiskUse(options.getAllowDiskUse()) + .collation(options.getCollation()) + .retryReads(retryReads); + return new CommandResult(executor.execute( + operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()), primaryPreferred(), getReadConcern(), null), getDefaultDBObjectCodec()); } List preparePipeline(final List pipeline) { @@ -1657,7 +1670,6 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod .filter(wrapAllowNull(query)) .projection(wrapAllowNull(options.getProjection())) .sort(wrapAllowNull(options.getSort())) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .collation(options.getCollation()); } else { DBObject update = options.getUpdate(); @@ -1665,33 +1677,31 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod throw new IllegalArgumentException("update can not be null unless it's a remove"); } if (!update.keySet().isEmpty() && update.keySet().iterator().next().charAt(0) == '$') { - operation = new FindAndUpdateOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec, - wrap(update)) + operation = new FindAndUpdateOperation<>(getNamespace(), writeConcern, retryWrites, + objectCodec, wrap(update)) .filter(wrap(query)) .projection(wrapAllowNull(options.getProjection())) .sort(wrapAllowNull(options.getSort())) .returnOriginal(!options.returnNew()) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .arrayFilters(wrapAllowNull(options.getArrayFilters(), (Encoder) null)); } else { - operation = new FindAndReplaceOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec, - wrap(update)) + operation = new FindAndReplaceOperation<>(getNamespace(), writeConcern, retryWrites, + objectCodec, wrap(update)) .filter(wrap(query)) .projection(wrapAllowNull(options.getProjection())) .sort(wrapAllowNull(options.getSort())) .returnOriginal(!options.returnNew()) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()); } } try { - return executor.execute(operation, getReadConcern()); + return getExecutor(createTimeoutSettings(getTimeoutSettings(), options)).execute(operation, getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -1791,7 +1801,8 @@ public ReadConcern getReadConcern() { */ public void drop() { try { - executor.execute(new DropCollectionOperation(getNamespace(), getWriteConcern()), getReadConcern()); + executor.execute(new DropCollectionOperation(getNamespace(), + getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -1859,10 +1870,17 @@ public void setDBEncoderFactory(@Nullable final DBEncoderFactory factory) { * @mongodb.driver.manual core/indexes/ Indexes */ public List getIndexInfo() { - return new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, primary(), retryReads) { + return new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, primary(), retryReads, + DBCollection.this.getTimeoutSettings()) { @Override public ReadOperation> asReadOperation() { - return new ListIndexesOperation<>(getNamespace(), getDefaultDBObjectCodec()).retryReads(retryReads); + return new ListIndexesOperation<>(getNamespace(), getDefaultDBObjectCodec()) + .retryReads(retryReads); + } + + @Override + public OperationExecutor getExecutor() { + return executor; } }.into(new ArrayList<>()); } @@ -1877,7 +1895,8 @@ public ReadOperation> asReadOperation() { */ public void dropIndex(final DBObject index) { try { - executor.execute(new DropIndexOperation(getNamespace(), wrap(index), getWriteConcern()), getReadConcern()); + executor.execute(new DropIndexOperation(getNamespace(), wrap(index), + getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -1892,7 +1911,8 @@ public void dropIndex(final DBObject index) { */ public void dropIndex(final String indexName) { try { - executor.execute(new DropIndexOperation(getNamespace(), indexName, getWriteConcern()), getReadConcern()); + executor.execute(new DropIndexOperation(getNamespace(), indexName, + getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -2006,9 +2026,9 @@ BulkWriteResult executeBulkWriteOperation(final boolean ordered, final Boolean b final List writeRequests, final WriteConcern writeConcern) { try { - return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation(getNamespace(), - translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false) - .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec()); + return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation( + getNamespace(), translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false) + .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec()); } catch (MongoBulkWriteException e) { throw BulkWriteHelper.translateBulkWriteException(e, MongoClient.getDefaultCodecRegistry().get(DBObject.class)); } @@ -2180,6 +2200,10 @@ BsonDocument wrap(final DBObject document, @Nullable final Encoder enc } } + TimeoutSettings getTimeoutSettings(){ + return database.getTimeoutSettings(); + } + static WriteConcernException createWriteConcernException(final MongoWriteConcernException e) { return new WriteConcernException(new BsonDocument("code", new BsonInt32(e.getWriteConcernError().getCode())) .append("errmsg", new BsonString(e.getWriteConcernError().getMessage())), @@ -2187,4 +2211,8 @@ static WriteConcernException createWriteConcernException(final MongoWriteConcern e.getWriteResult()); } + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + } diff --git a/driver-legacy/src/main/com/mongodb/DBCursor.java b/driver-legacy/src/main/com/mongodb/DBCursor.java index 739901b7c57..9b91bad5984 100644 --- a/driver-legacy/src/main/com/mongodb/DBCursor.java +++ b/driver-legacy/src/main/com/mongodb/DBCursor.java @@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit; import static com.mongodb.MongoClient.getDefaultCodecRegistry; +import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -370,9 +371,9 @@ public DBCursor maxTime(final long maxTime, final TimeUnit timeUnit) { * @mongodb.server.release 3.0 */ public DBObject explain() { - return executor.execute(getQueryOperation(collection.getObjectCodec()) - .asExplainableOperation(null, getDefaultCodecRegistry().get(DBObject.class)), - getReadPreference(), getReadConcern()); + return executor.execute( + getQueryOperation(collection.getObjectCodec()) + .asExplainableOperation(null, getDefaultCodecRegistry().get(DBObject.class)), getReadPreference(), getReadConcern(), null); } /** @@ -413,31 +414,29 @@ public DBCursor partial(final boolean partial) { } private FindOperation getQueryOperation(final Decoder decoder) { - - return new FindOperation<>(collection.getNamespace(), decoder) - .filter(collection.wrapAllowNull(filter)) - .batchSize(findOptions.getBatchSize()) - .skip(findOptions.getSkip()) - .limit(findOptions.getLimit()) - .maxAwaitTime(findOptions.getMaxAwaitTime(MILLISECONDS), MILLISECONDS) - .maxTime(findOptions.getMaxTime(MILLISECONDS), MILLISECONDS) - .projection(collection.wrapAllowNull(findOptions.getProjection())) - .sort(collection.wrapAllowNull(findOptions.getSort())) - .collation(findOptions.getCollation()) - .comment(findOptions.getComment() != null - ? new BsonString(findOptions.getComment()) : null) - .hint(findOptions.getHint() != null - ? collection.wrapAllowNull(findOptions.getHint()) - : (findOptions.getHintString() != null - ? new BsonString(findOptions.getHintString()) : null)) - .min(collection.wrapAllowNull(findOptions.getMin())) - .max(collection.wrapAllowNull(findOptions.getMax())) - .cursorType(findOptions.getCursorType()) - .noCursorTimeout(findOptions.isNoCursorTimeout()) - .partial(findOptions.isPartial()) - .returnKey(findOptions.isReturnKey()) - .showRecordId(findOptions.isShowRecordId()) - .retryReads(retryReads); + return new FindOperation<>( + collection.getNamespace(), decoder) + .filter(collection.wrapAllowNull(filter)) + .batchSize(findOptions.getBatchSize()) + .skip(findOptions.getSkip()) + .limit(findOptions.getLimit()) + .projection(collection.wrapAllowNull(findOptions.getProjection())) + .sort(collection.wrapAllowNull(findOptions.getSort())) + .collation(findOptions.getCollation()) + .comment(findOptions.getComment() != null + ? new BsonString(findOptions.getComment()) : null) + .hint(findOptions.getHint() != null + ? collection.wrapAllowNull(findOptions.getHint()) + : (findOptions.getHintString() != null + ? new BsonString(findOptions.getHintString()) : null)) + .min(collection.wrapAllowNull(findOptions.getMin())) + .max(collection.wrapAllowNull(findOptions.getMax())) + .cursorType(findOptions.getCursorType()) + .noCursorTimeout(findOptions.isNoCursorTimeout()) + .partial(findOptions.isPartial()) + .returnKey(findOptions.isReturnKey()) + .showRecordId(findOptions.isShowRecordId()) + .retryReads(retryReads); } /** @@ -787,7 +786,10 @@ public String toString() { } private void initializeCursor(final FindOperation operation) { - cursor = new MongoBatchCursorAdapter<>(executor.execute(operation, getReadPreference(), getReadConcern())); + cursor = + new MongoBatchCursorAdapter<>(executor + .withTimeoutSettings(createTimeoutSettings(collection.getTimeoutSettings(), findOptions)) + .execute(operation, getReadPreference(), getReadConcern(), null)); ServerCursor serverCursor = cursor.getServerCursor(); if (isCursorFinalizerEnabled() && serverCursor != null) { optionalCleaner = DBCursorCleaner.create(collection.getDB().getMongoClient(), collection.getNamespace(), diff --git a/driver-legacy/src/main/com/mongodb/MongoClient.java b/driver-legacy/src/main/com/mongodb/MongoClient.java index 94432049351..1e3f0a00c2b 100644 --- a/driver-legacy/src/main/com/mongodb/MongoClient.java +++ b/driver-legacy/src/main/com/mongodb/MongoClient.java @@ -28,11 +28,15 @@ import com.mongodb.connection.ClusterSettings; import com.mongodb.event.ClusterListener; import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.binding.ReadWriteBinding; import com.mongodb.internal.binding.SingleServerBinding; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.NoOpSessionContext; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.session.ServerSessionPool; @@ -824,6 +828,10 @@ MongoClientImpl getDelegate() { return delegate; } + TimeoutSettings getTimeoutSettings() { + return delegate.getTimeoutSettings(); + } + private ExecutorService createCursorCleaningService() { ScheduledExecutorService newTimer = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("CleanCursors")); newTimer.scheduleAtFixedRate(this::cleanCursors, 1, 1, SECONDS); @@ -834,7 +842,8 @@ private void cleanCursors() { ServerCursorAndNamespace cur; while ((cur = orphanedCursors.poll()) != null) { ReadWriteBinding binding = new SingleServerBinding(delegate.getCluster(), cur.serverCursor.getAddress(), - options.getServerApi(), IgnorableRequestContext.INSTANCE); + new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(getTimeoutSettings()), options.getServerApi())); try { ConnectionSource source = binding.getReadConnectionSource(); try { @@ -843,7 +852,7 @@ private void cleanCursors() { BsonDocument killCursorsCommand = new BsonDocument("killCursors", new BsonString(cur.namespace.getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(cur.serverCursor.getId())))); connection.command(cur.namespace.getDatabaseName(), killCursorsCommand, new NoOpFieldNameValidator(), - ReadPreference.primary(), new BsonDocumentCodec(), source); + ReadPreference.primary(), new BsonDocumentCodec(), source.getOperationContext()); } finally { connection.release(); } diff --git a/driver-legacy/src/main/com/mongodb/MongoClientOptions.java b/driver-legacy/src/main/com/mongodb/MongoClientOptions.java index d5fe68e2f70..1f19fba3484 100644 --- a/driver-legacy/src/main/com/mongodb/MongoClientOptions.java +++ b/driver-legacy/src/main/com/mongodb/MongoClientOptions.java @@ -16,8 +16,10 @@ package com.mongodb; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Immutable; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ConnectionPoolSettings; import com.mongodb.event.ClusterListener; @@ -550,6 +552,38 @@ public ServerApi getServerApi() { return wrapped.getServerApi(); } + /** + * The time limit for the full execution of an operation in Milliseconds. + * + *

      If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

      + * + *
        + *
      • {@code null} means that the timeout mechanism for operations will defer to using: + *
          + *
        • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
        • + *
        • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
        • + *
        • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
        • + *
        • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
        • + *
        • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
        • + *
        + *
      • + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @return the timeout in milliseconds + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout() { + return wrapped.getTimeout(MILLISECONDS); + } + /** * Gets the server selector. * @@ -1316,6 +1350,37 @@ public Builder srvServiceName(final String srvServiceName) { return this; } + /** + * Sets the time limit, in milliseconds for the full execution of an operation. + * + *
        + *
      • {@code null} means that the timeout mechanism for operations will defer to using: + *
          + *
        • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
        • + *
        • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
        • + *
        • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
        • + *
        • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
        • + *
        • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
        • + *
        + *
      • + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeoutMS the timeout in milliseconds + * @return this + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + public Builder timeout(final long timeoutMS) { + wrapped.timeout(timeoutMS, MILLISECONDS); + return this; + } + /** * Build an instance of MongoClientOptions. * diff --git a/driver-legacy/src/main/com/mongodb/MongoClientURI.java b/driver-legacy/src/main/com/mongodb/MongoClientURI.java index 43cdccc4f4b..e471bbf1686 100644 --- a/driver-legacy/src/main/com/mongodb/MongoClientURI.java +++ b/driver-legacy/src/main/com/mongodb/MongoClientURI.java @@ -99,7 +99,8 @@ * sslInvalidHostNameAllowed option *
    2. {@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.
    3. *
    4. {@code socketTimeoutMS=ms}: How long a receive on a socket can take before timing out. - * This option is the same as {@link MongoClientOptions#getSocketTimeout()}.
    5. + * This option is the same as {@link MongoClientOptions#getSocketTimeout()}. + * Deprecated, use {@code timeoutMS} instead. *
    6. {@code maxIdleTimeMS=ms}: Maximum idle time of a pooled connection. A connection that exceeds this limit will be closed
    7. *
    8. {@code maxLifeTimeMS=ms}: Maximum life time of a pooled connection. A connection that exceeds this limit will be closed
    9. * @@ -114,6 +115,8 @@ *
        *
      • {@code maxPoolSize=n}: The maximum number of connections in the connection pool.
      • *
      • {@code maxConnecting=n}: The maximum number of connections a pool may be establishing concurrently.
      • + *
      • {@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available. Deprecated, use {@code timeoutMS} instead.
      • *
      * *

      Write concern configuration:

      @@ -138,7 +141,7 @@ * {@code "majority"} * * - *
    10. {@code wtimeoutMS=ms} + *
    11. {@code wtimeoutMS=ms}. Deprecated, use {@code timeoutMS} instead. *
        *
      • The driver adds { wtimeout : ms } to all write commands. Implies {@code safe=true}.
      • *
      • Used in combination with {@code w}
      • @@ -459,6 +462,10 @@ public MongoClientOptions getOptions() { if (srvServiceName != null) { builder.srvServiceName(srvServiceName); } + Long timeout = proxied.getTimeout(); + if (timeout != null) { + builder.timeout(timeout); + } return builder.build(); } diff --git a/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java new file mode 100644 index 00000000000..e47dd7bd32b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.DBCollectionCountOptions; +import com.mongodb.client.model.DBCollectionFindAndModifyOptions; +import com.mongodb.client.model.DBCollectionFindOptions; +import com.mongodb.internal.TimeoutSettings; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class TimeoutSettingsHelper { + + private TimeoutSettingsHelper() { + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final AggregationOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionCountOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindAndModifyOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final MapReduceCommand options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + +} diff --git a/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java b/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java index f63224cc5f0..cc515f1cb4f 100644 --- a/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java +++ b/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java @@ -47,6 +47,7 @@ protected MongoDatabase getDatabase(final String databaseName) { @After public void cleanUp() { + super.cleanUp(); if (mongoClient != null) { mongoClient.close(); } diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy index 6118ce4cdaa..98cb8282c17 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy @@ -271,7 +271,8 @@ class DBCollectionSpecification extends Specification { collection.find().iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .retryReads(true)) @@ -280,7 +281,8 @@ class DBCollectionSpecification extends Specification { collection.find().iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .retryReads(true)) @@ -289,7 +291,8 @@ class DBCollectionSpecification extends Specification { collection.find(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)).iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .collation(collation) .retryReads(true)) @@ -311,7 +314,8 @@ class DBCollectionSpecification extends Specification { collection.findOne() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) .retryReads(true)) @@ -321,7 +325,8 @@ class DBCollectionSpecification extends Specification { collection.findOne() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) .retryReads(true)) @@ -331,7 +336,8 @@ class DBCollectionSpecification extends Specification { collection.findOne(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) .collation(collation) @@ -351,8 +357,8 @@ class DBCollectionSpecification extends Specification { collection.findAndRemove(query) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation(collection.getNamespace(), - WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument())) + expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation(collection. + getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument())) } def 'findAndModify should create the correct FindAndUpdateOperation'() { @@ -383,7 +389,8 @@ class DBCollectionSpecification extends Specification { expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(collection.getNamespace(), WriteConcern.W3, retryWrites, collection.getObjectCodec(), bsonUpdate) .filter(new BsonDocument()) - .collation(collation).arrayFilters(bsonDocumentWrapperArrayFilters)) + .collation(collation) + .arrayFilters(bsonDocumentWrapperArrayFilters)) where: dbObjectArrayFilters << [null, [], [new BasicDBObject('i.b', 1)]] @@ -407,8 +414,8 @@ class DBCollectionSpecification extends Specification { collection.findAndModify(query, replace) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection.getNamespace(), - WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace) + expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection. + getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace) .filter(new BsonDocument())) when: // With options @@ -477,8 +484,8 @@ class DBCollectionSpecification extends Specification { then: distinctFieldValues == [1, 2] - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()) - .filter(new BsonDocument()).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', + new BsonValueCodec()).filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.DEFAULT when: // Inherits from DB @@ -486,7 +493,8 @@ class DBCollectionSpecification extends Specification { collection.distinct('field1') then: - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()) + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', + new BsonValueCodec()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -495,8 +503,8 @@ class DBCollectionSpecification extends Specification { collection.distinct('field1', new DBCollectionDistinctOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()) - .collation(collation).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', + new BsonValueCodec()).collation(collation).retryReads(true)) executor.getReadConcern() == ReadConcern.LOCAL } @@ -515,8 +523,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - collection.getDefaultDBObjectCodec()) + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument())) executor.getReadConcern() == ReadConcern.DEFAULT @@ -527,8 +535,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - collection.getDefaultDBObjectCodec()) + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument())) executor.getReadConcern() == ReadConcern.LOCAL @@ -542,8 +550,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - collection.getDefaultDBObjectCodec()) + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument()) .collation(collation)) @@ -562,8 +570,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - 'myColl', collection.getWriteConcern()) + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) ) @@ -573,8 +581,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - 'myColl', collection.getWriteConcern()) + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) ) @@ -587,8 +595,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - 'myColl', collection.getWriteConcern()) + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) .collation(collation) @@ -611,8 +619,8 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) executor.getReadConcern() == ReadConcern.DEFAULT when: // Inherits from DB @@ -620,8 +628,8 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY when: @@ -629,8 +637,8 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true)) executor.getReadConcern() == ReadConcern.LOCAL } @@ -678,8 +686,8 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) when: // Inherits from DB @@ -687,8 +695,8 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) when: @@ -696,8 +704,8 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) } @@ -717,8 +725,8 @@ class DBCollectionSpecification extends Specification { collection.update(BasicDBObject.parse(query), BasicDBObject.parse(update)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, - WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), + true, WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest))) when: // Inherits from DB db.setWriteConcern(WriteConcern.W3) @@ -726,8 +734,8 @@ class DBCollectionSpecification extends Specification { then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, - WriteConcern.W3, retryWrites, asList(updateRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), + true, WriteConcern.W3, retryWrites, asList(updateRequest))) when: collection.setWriteConcern(WriteConcern.W1) @@ -736,8 +744,8 @@ class DBCollectionSpecification extends Specification { new DBCollectionUpdateOptions().collation(collation).arrayFilters(dbObjectArrayFilters)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, - WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters)))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), + true, WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters)))) where: dbObjectArrayFilters << [null, [], [new BasicDBObject('i.b', 1)]] @@ -759,16 +767,16 @@ class DBCollectionSpecification extends Specification { collection.remove(BasicDBObject.parse(query)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, - WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), + false, WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest))) when: // Inherits from DB db.setWriteConcern(WriteConcern.W3) collection.remove(BasicDBObject.parse(query)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, - WriteConcern.W3, retryWrites, asList(deleteRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), + false, WriteConcern.W3, retryWrites, asList(deleteRequest))) when: collection.setWriteConcern(WriteConcern.W1) @@ -776,8 +784,8 @@ class DBCollectionSpecification extends Specification { collection.remove(BasicDBObject.parse(query), new DBCollectionRemoveOptions().collation(collation)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, - WriteConcern.W1, retryWrites, asList(deleteRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), + false, WriteConcern.W1, retryWrites, asList(deleteRequest))) } def 'should create the correct MixedBulkWriteOperation'() { @@ -808,7 +816,8 @@ class DBCollectionSpecification extends Specification { bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), + writeRequests, ordered, WriteConcern.ACKNOWLEDGED, false)) when: // Inherits from DB @@ -816,16 +825,16 @@ class DBCollectionSpecification extends Specification { bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, - WriteConcern.W3, false)) + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), + writeRequests, ordered, WriteConcern.W3, false)) when: collection.setWriteConcern(WriteConcern.W1) bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, - WriteConcern.W1, false)) + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), + writeRequests, ordered, WriteConcern.W1, false)) where: ordered << [true, false, true] diff --git a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy index 227126b1160..85fb3ad867e 100644 --- a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy +++ b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy @@ -167,7 +167,8 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should return correct result for replace'() { given: def replacement = new UpdateRequest(new BsonDocument(), new BsonDocument('_id', new BsonInt32(1)), REPLACE) - def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) when: def result = execute(operation) @@ -182,11 +183,13 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should replace a single document'() { given: def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) - createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)).execute(getBinding()) + createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) + .execute(getBinding()) def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) - def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) when: def result = execute(operation) @@ -205,7 +208,8 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) .upsert(true) - def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) when: execute(operation) @@ -216,9 +220,9 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should update nothing if no documents match'() { given: - def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, - asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), - new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, + false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) when: WriteConcernResult result = execute(operation) diff --git a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy index 84a755b5353..59dceb6478a 100644 --- a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy @@ -122,10 +122,11 @@ class DBCursorSpecification extends Specification { cursor.toArray() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) - .filter(new BsonDocument()) - .projection(new BsonDocument()) - .retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .projection(new BsonDocument()) + .retryReads(true)) } @@ -140,11 +141,13 @@ class DBCursorSpecification extends Specification { cursor.one() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) - .limit(-1) - .filter(new BsonDocument()) - .projection(new BsonDocument()) - .retryReads(true)) + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + .limit(-1) + .filter(new BsonDocument()) + .projection(new BsonDocument()) + .retryReads(true) + ) } def 'DBCursor methods should be used to create the expected operation'() { @@ -167,7 +170,7 @@ class DBCursorSpecification extends Specification { .batchSize(1) .cursorType(cursorType) .limit(1) - .maxTime(1, TimeUnit.MILLISECONDS) + .maxTime(100, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .partial(true) .skip(1) @@ -177,13 +180,13 @@ class DBCursorSpecification extends Specification { cursor.toArray() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .batchSize(1) .collation(collation) .cursorType(cursorType) .filter(bsonFilter) .limit(1) - .maxTime(1, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .partial(true) .skip(1) @@ -221,8 +224,8 @@ class DBCursorSpecification extends Specification { .collation(collation) .cursorType(cursorType) .limit(1) - .maxAwaitTime(1, TimeUnit.MILLISECONDS) - .maxTime(1, TimeUnit.MILLISECONDS) + .maxAwaitTime(1001, TimeUnit.MILLISECONDS) + .maxTime(101, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .partial(true) .projection(projection) @@ -249,8 +252,6 @@ class DBCursorSpecification extends Specification { .cursorType(cursorType) .filter(bsonFilter) .limit(1) - .maxAwaitTime(1, TimeUnit.MILLISECONDS) - .maxTime(1, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .partial(true) .projection(bsonProjection) diff --git a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy index fe61ba00a3d..5f0c81f28cc 100644 --- a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy @@ -36,6 +36,7 @@ import spock.lang.Specification import static Fixture.getMongoClient import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry import static org.junit.Assume.assumeTrue @@ -76,6 +77,7 @@ class DBSpecification extends Specification { def mongo = Stub(MongoClient) mongo.mongoClientOptions >> MongoClientOptions.builder().build() mongo.codecRegistry >> getDefaultCodecRegistry() + mongo.timeoutSettings >> TIMEOUT_SETTINGS def executor = new TestOperationExecutor([1L, 2L, 3L]) def db = new DB(mongo, 'test', executor) db.setReadConcern(ReadConcern.MAJORITY) @@ -134,7 +136,8 @@ class DBSpecification extends Specification { operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()).collation(collation)) + expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()) + .collation(collation)) executor.getReadConcern() == ReadConcern.MAJORITY } @@ -144,6 +147,7 @@ class DBSpecification extends Specification { getCodecRegistry() >> MongoClient.defaultCodecRegistry } mongo.mongoClientOptions >> MongoClientOptions.builder().build() + mongo.timeoutSettings >> TIMEOUT_SETTINGS def executor = new TestOperationExecutor([1L, 2L, 3L]) def databaseName = 'test' @@ -180,6 +184,7 @@ class DBSpecification extends Specification { given: def mongo = Stub(MongoClient) mongo.mongoClientOptions >> MongoClientOptions.builder().build() + mongo.timeoutSettings >> TIMEOUT_SETTINGS def executor = new TestOperationExecutor([Stub(BatchCursor), Stub(BatchCursor)]) def databaseName = 'test' @@ -191,7 +196,8 @@ class DBSpecification extends Specification { def operation = executor.getReadOperation() as ListCollectionsOperation then: - expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry())) + expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, + new DBObjectCodec(getDefaultCodecRegistry())) .nameOnly(true)) when: @@ -199,7 +205,8 @@ class DBSpecification extends Specification { operation = executor.getReadOperation() as ListCollectionsOperation then: - expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry())) + expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, + new DBObjectCodec(getDefaultCodecRegistry())) .nameOnly(true)) } diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy index c36eacd2198..ae1d332674c 100644 --- a/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy @@ -51,6 +51,7 @@ class MongoClientOptionsSpecification extends Specification { options.getMinConnectionsPerHost() == 0 options.getConnectionsPerHost() == 100 options.getMaxConnecting() == 2 + options.getTimeout() == null options.getConnectTimeout() == 10000 options.getReadPreference() == ReadPreference.primary() options.getServerSelector() == null @@ -119,6 +120,7 @@ class MongoClientOptionsSpecification extends Specification { .readConcern(ReadConcern.MAJORITY) .minConnectionsPerHost(30) .connectionsPerHost(500) + .timeout(10_000) .connectTimeout(100) .socketTimeout(700) .serverSelector(serverSelector) @@ -161,6 +163,7 @@ class MongoClientOptionsSpecification extends Specification { options.getRetryWrites() !options.getRetryReads() options.getServerSelectionTimeout() == 150 + options.getTimeout() == 10_000 options.getMaxWaitTime() == 200 options.getMaxConnectionIdleTime() == 300 options.getMaxConnectionLifeTime() == 400 @@ -211,6 +214,7 @@ class MongoClientOptionsSpecification extends Specification { settings.readConcern == ReadConcern.MAJORITY settings.uuidRepresentation == UuidRepresentation.C_SHARP_LEGACY settings.serverApi == serverApi + settings.getTimeout(TimeUnit.MILLISECONDS) == 10_000 when: def optionsFromSettings = MongoClientOptions.builder(settings).build() @@ -224,6 +228,7 @@ class MongoClientOptionsSpecification extends Specification { optionsFromSettings.getRetryWrites() !optionsFromSettings.getRetryReads() optionsFromSettings.getServerSelectionTimeout() == 150 + optionsFromSettings.getServerSelectionTimeout() == 150 optionsFromSettings.getMaxWaitTime() == 200 optionsFromSettings.getMaxConnectionIdleTime() == 300 optionsFromSettings.getMaxConnectionLifeTime() == 400 @@ -317,6 +322,7 @@ class MongoClientOptionsSpecification extends Specification { .writeConcern(WriteConcern.JOURNALED) .minConnectionsPerHost(30) .connectionsPerHost(500) + .timeout(10_000) .connectTimeout(100) .socketTimeout(700) .serverSelectionTimeout(150) @@ -616,6 +622,7 @@ class MongoClientOptionsSpecification extends Specification { .uuidRepresentation(UuidRepresentation.STANDARD) .minConnectionsPerHost(30) .connectionsPerHost(500) + .timeout(10_000) .connectTimeout(100) .socketTimeout(700) .serverSelectionTimeout(150) diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy index c20fbabfb58..c007e504ae6 100644 --- a/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy @@ -30,6 +30,7 @@ import org.bson.codecs.configuration.CodecRegistry import org.bson.json.JsonObject import spock.lang.Specification +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry import static com.mongodb.MongoCredential.createMongoX509Credential @@ -340,7 +341,7 @@ class MongoClientSpecification extends Specification { then: expect database, isTheSameAs(new MongoDatabaseImpl('name', client.getCodecRegistry(), secondary(), WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, STANDARD, null, - client.getOperationExecutor())) + TIMEOUT_SETTINGS.withMaxWaitTimeMS(120_000), client.getOperationExecutor())) } def 'should create registry reflecting UuidRepresentation'() { diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy index b187df8dab8..241ac958c8a 100644 --- a/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy @@ -132,7 +132,8 @@ class MongoClientURISpecification extends Specification { + 'retryWrites=true&' + 'retryReads=true&' + 'uuidRepresentation=csharpLegacy&' - + 'appName=app1') + + 'appName=app1&' + + 'timeoutMS=10000') when: def options = uri.getOptions() @@ -146,6 +147,7 @@ class MongoClientURISpecification extends Specification { options.getMaxConnectionIdleTime() == 200 options.getMaxConnectionLifeTime() == 300 options.getMaxConnecting() == 1 + options.getTimeout() == 10_000 options.getSocketTimeout() == 5500 options.getConnectTimeout() == 2500 options.getRequiredReplicaSetName() == 'test' @@ -167,6 +169,7 @@ class MongoClientURISpecification extends Specification { then: options.getConnectionsPerHost() == 100 options.getMaxConnecting() == 2 + options.getTimeout() == null options.getMaxWaitTime() == 120000 options.getConnectTimeout() == 10000 options.getSocketTimeout() == 0 @@ -188,6 +191,7 @@ class MongoClientURISpecification extends Specification { .writeConcern(WriteConcern.JOURNALED) .minConnectionsPerHost(30) .connectionsPerHost(500) + .timeout(10_000) .connectTimeout(100) .socketTimeout(700) .serverSelectionTimeout(150) @@ -216,6 +220,7 @@ class MongoClientURISpecification extends Specification { options.getWriteConcern() == WriteConcern.JOURNALED options.getRetryWrites() options.getRetryReads() + options.getTimeout() == 10_000 options.getServerSelectionTimeout() == 150 options.getMaxWaitTime() == 200 options.getMaxConnectionIdleTime() == 300 diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java index a879094fa37..0642d0fc8f9 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java @@ -17,6 +17,9 @@ package com.mongodb.reactivestreams.client; import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -172,6 +175,27 @@ public interface AggregatePublisher extends Publisher { */ AggregatePublisher batchSize(int batchSize); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + *

        + * If the {@code timeout} is set then: + *

          + *
        • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
        • + *
        • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
        • + *
        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + AggregatePublisher timeoutMode(TimeoutMode timeoutMode); + /** * Helper to return a publisher limited to the first result. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java index bf47ed7d9a2..2b695621dc3 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java @@ -16,6 +16,9 @@ package com.mongodb.reactivestreams.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -94,6 +97,20 @@ public interface DistinctPublisher extends Publisher { */ DistinctPublisher comment(@Nullable BsonValue comment); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + DistinctPublisher timeoutMode(TimeoutMode timeoutMode); + /** * Helper to return a publisher limited to the first result. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java index 8a485facaf5..1128c87bd02 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java @@ -18,6 +18,9 @@ import com.mongodb.CursorType; import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.client.model.Projections; import com.mongodb.lang.Nullable; @@ -269,6 +272,27 @@ public interface FindPublisher extends Publisher { */ FindPublisher allowDiskUse(@Nullable Boolean allowDiskUse); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + *

        + * If the {@code timeout} is set then: + *

          + *
        • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
        • + *
        • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
        • + *
        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + FindPublisher timeoutMode(TimeoutMode timeoutMode); + /** * Explain the execution plan for this operation with the server's default verbosity level * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java index dadef9dfab9..50808928172 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java @@ -16,6 +16,9 @@ package com.mongodb.reactivestreams.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import org.bson.BsonValue; import org.bson.conversions.Bson; @@ -84,6 +87,20 @@ public interface ListCollectionsPublisher extends Publisher { */ ListCollectionsPublisher comment(@Nullable BsonValue comment); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListCollectionsPublisher timeoutMode(TimeoutMode timeoutMode); + /** * Helper to return a publisher limited to the first result. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java index 6f6f11e5296..0dea2b0e219 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java @@ -17,6 +17,9 @@ package com.mongodb.reactivestreams.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import org.bson.BsonValue; import org.bson.conversions.Bson; @@ -107,6 +110,20 @@ public interface ListDatabasesPublisher extends Publisher { */ ListDatabasesPublisher comment(@Nullable BsonValue comment); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListDatabasesPublisher timeoutMode(TimeoutMode timeoutMode); + /** * Helper to return a publisher limited to the first result. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java index 9ee05851576..f2abb11a9bb 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java @@ -16,6 +16,9 @@ package com.mongodb.reactivestreams.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import org.bson.BsonValue; import org.reactivestreams.Publisher; @@ -73,6 +76,20 @@ public interface ListIndexesPublisher extends Publisher { */ ListIndexesPublisher comment(@Nullable BsonValue comment); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListIndexesPublisher timeoutMode(TimeoutMode timeoutMode); + /** * Helper to return a publisher limited to the first result. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java index 2eacc6922bb..f7d0eb74f6c 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java @@ -17,7 +17,10 @@ package com.mongodb.reactivestreams.client; import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Evolving; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -98,6 +101,20 @@ public interface ListSearchIndexesPublisher extends Publisher */ ListSearchIndexesPublisher comment(@Nullable BsonValue comment); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListSearchIndexesPublisher timeoutMode(TimeoutMode timeoutMode); + /** * Helper to return a publisher limited to the first result. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java index e57a8fce007..2add0f33691 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java @@ -17,6 +17,9 @@ package com.mongodb.reactivestreams.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.conversions.Bson; @@ -181,6 +184,27 @@ public interface MapReducePublisher extends Publisher { */ MapReducePublisher batchSize(int batchSize); + /** + * Sets the timeoutMode for the cursor. + * + *

        + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

        + *

        + * If the {@code timeout} is set then: + *

          + *
        • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
        • + *
        • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
        • + *
        + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + MapReducePublisher timeoutMode(TimeoutMode timeoutMode); + /** * Helper to return a publisher limited to the first result. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java index ed29939fbdc..061fd3c8bed 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java @@ -16,17 +16,12 @@ package com.mongodb.reactivestreams.client; -import com.mongodb.ClientSessionOptions; import com.mongodb.annotations.Immutable; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ClusterSettings; import com.mongodb.event.ClusterListener; -import org.bson.Document; -import org.bson.conversions.Bson; -import org.reactivestreams.Publisher; import java.io.Closeable; -import java.util.List; /** * A client-side representation of a MongoDB cluster. Instances can represent either a standalone MongoDB instance, a replica set, @@ -39,14 +34,7 @@ * @since 1.0 */ @Immutable -public interface MongoClient extends Closeable { - /** - * Gets the database with the given name. - * - * @param name the name of the database - * @return the database - */ - MongoDatabase getDatabase(String name); +public interface MongoClient extends MongoCluster, Closeable { /** * Close the client, which will close all underlying cached resources, including, for example, @@ -54,179 +42,6 @@ public interface MongoClient extends Closeable { */ void close(); - /** - * Get a list of the database names - * - * @mongodb.driver.manual reference/commands/listDatabases List Databases - * @return an iterable containing all the names of all the databases - */ - Publisher listDatabaseNames(); - - /** - * Get a list of the database names - * - * @param clientSession the client session with which to associate this operation - * @mongodb.driver.manual reference/commands/listDatabases List Databases - * @return an iterable containing all the names of all the databases - * - * @mongodb.server.release 3.6 - * @since 1.7 - */ - Publisher listDatabaseNames(ClientSession clientSession); - - /** - * Gets the list of databases - * - * @return the fluent list databases interface - */ - ListDatabasesPublisher listDatabases(); - - /** - * Gets the list of databases - * - * @param clazz the class to cast the database documents to - * @param the type of the class to use instead of {@code Document}. - * @return the fluent list databases interface - */ - ListDatabasesPublisher listDatabases(Class clazz); - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @return the fluent list databases interface - * @mongodb.server.release 3.6 - * @since 1.7 - */ - ListDatabasesPublisher listDatabases(ClientSession clientSession); - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @param clazz the class to cast the database documents to - * @param the type of the class to use instead of {@code Document}. - * @return the fluent list databases interface - * @mongodb.server.release 3.6 - * @since 1.7 - */ - ListDatabasesPublisher listDatabases(ClientSession clientSession, Class clazz); - - /** - * Creates a change stream for this client. - * - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 1.9 - * @mongodb.server.release 4.0 - */ - ChangeStreamPublisher watch(); - - /** - * Creates a change stream for this client. - * - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 1.9 - * @mongodb.server.release 4.0 - */ - ChangeStreamPublisher watch(Class resultClass); - - /** - * Creates a change stream for this client. - * - * @param pipeline the aggregation pipeline to apply to the change stream. - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 1.9 - * @mongodb.server.release 4.0 - */ - ChangeStreamPublisher watch(List pipeline); - - /** - * Creates a change stream for this client. - * - * @param pipeline the aggregation pipeline to apply to the change stream - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 1.9 - * @mongodb.server.release 4.0 - */ - ChangeStreamPublisher watch(List pipeline, Class resultClass); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @return the change stream iterable - * @since 1.9 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamPublisher watch(ClientSession clientSession); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @since 1.9 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamPublisher watch(ClientSession clientSession, Class resultClass); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream. - * @return the change stream iterable - * @since 1.9 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamPublisher watch(ClientSession clientSession, List pipeline); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @since 1.9 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamPublisher watch(ClientSession clientSession, List pipeline, Class resultClass); - - /** - * Creates a client session. - * - * @return a publisher for the client session. - * @mongodb.server.release 3.6 - * @since 1.9 - */ - Publisher startSession(); - - /** - * Creates a client session. - * - * @param options the options for the client session - * @return a publisher for the client session. - * @mongodb.server.release 3.6 - * @since 1.7 - */ - Publisher startSession(ClientSessionOptions options); - /** * Gets the current cluster description. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java index 28bcc068805..a2f5fb9d125 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java @@ -21,6 +21,7 @@ import com.mongodb.MongoClientSettings; import com.mongodb.MongoDriverInformation; import com.mongodb.connection.TransportSettings; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.connection.AsynchronousSocketChannelStreamFactoryFactory; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.DefaultClusterFactory; @@ -148,11 +149,11 @@ private static Cluster createCluster(final MongoClientSettings settings, final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory) { notNull("settings", settings); return new DefaultClusterFactory().createCluster(settings.getClusterSettings(), settings.getServerSettings(), - settings.getConnectionPoolSettings(), - InternalConnectionPoolSettings.builder().prestartAsyncWorkManager(true).build(), - streamFactory, heartbeatStreamFactory, settings.getCredential(), settings.getLoggerSettings(), - getCommandListener(settings.getCommandListeners()), settings.getApplicationName(), mongoDriverInformation, - settings.getCompressorList(), settings.getServerApi(), settings.getDnsClient()); + settings.getConnectionPoolSettings(), InternalConnectionPoolSettings.builder().prestartAsyncWorkManager(true).build(), + TimeoutSettings.create(settings), streamFactory, TimeoutSettings.createHeartbeatSettings(settings), heartbeatStreamFactory, + settings.getCredential(), settings.getLoggerSettings(), getCommandListener(settings.getCommandListeners()), + settings.getApplicationName(), mongoDriverInformation, settings.getCompressorList(), settings.getServerApi(), + settings.getDnsClient()); } private static MongoDriverInformation wrapMongoDriverInformation(@Nullable final MongoDriverInformation mongoDriverInformation) { diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java new file mode 100644 index 00000000000..ef7c0ddb79d --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java @@ -0,0 +1,356 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The client-side representation of a MongoDB cluster operations. + * + *

        + * The originating {@link MongoClient} is responsible for the closing of resources. + * If the originator {@link MongoClient} is closed, then any cluster operations will fail. + *

        + * + * @see MongoClient + * @since 5.2 + */ +@Immutable +public interface MongoCluster { + + /** + * Get the codec registry for the MongoCluster. + * + * @return the {@link CodecRegistry} + * @since 5.2 + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoCluster. + * + * @return the {@link ReadPreference} + * @since 5.2 + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoCluster. + * + * @return the {@link WriteConcern} + * @since 5.2 + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoCluster. + * + * @return the {@link ReadConcern} + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

        If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

        + * + *
          + *
        • {@code null} means that the timeout mechanism for operations will defer to using: + *
            + *
          • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
          • + *
          • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
          • + *
          • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
          • + *
          • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
          • + *
          • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
          • + *
          + *
        • + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoCluster instance with a different codec registry. + * + *

        The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

        + * + * @param codecRegistry the new {@link CodecRegistry} for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + * @since 5.2 + */ + MongoCluster withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param readPreference the new {@link ReadPreference} for the database + * @return a new MongoCluster instance with the different readPreference + * @since 5.2 + */ + MongoCluster withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param writeConcern the new {@link WriteConcern} for the database + * @return a new MongoCluster instance with the different writeConcern + * @since 5.2 + */ + MongoCluster withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the database + * @return a new MongoCluster instance with the different ReadConcern + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + MongoCluster withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * + *
          + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCluster instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCluster withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Gets a {@link MongoDatabase} instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a {@code MongoDatabase} representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see MongoNamespace#checkDatabaseNameValidity(String) + */ + MongoDatabase getDatabase(String databaseName); + + /** + * Creates a client session with default options. + * + *

        Note: A ClientSession instance can not be used concurrently in multiple operations.

        + * + * @return the client session + * @mongodb.server.release 3.6 + */ + Publisher startSession(); + + /** + * Creates a client session. + * + *

        Note: A ClientSession instance can not be used concurrently in multiple operations.

        + * + * @param options the options for the client session + * @return the client session + * @mongodb.server.release 3.6 + */ + Publisher startSession(ClientSessionOptions options); + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + */ + Publisher listDatabaseNames(); + + /** + * Get a list of the database names + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + Publisher listDatabaseNames(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + ListDatabasesPublisher listDatabases(); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesPublisher listDatabases(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + */ + ListDatabasesPublisher listDatabases(Class resultClass); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesPublisher listDatabases(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(); + + /** + * Creates a change stream for this client. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(List pipeline); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline, Class resultClass); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java index 635547ef7f7..4e17208b342 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java @@ -20,6 +20,8 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.ThreadSafe; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.client.model.BulkWriteOptions; @@ -45,12 +47,14 @@ import com.mongodb.client.result.InsertManyResult; import com.mongodb.client.result.InsertOneResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import org.reactivestreams.Publisher; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoCollection interface. @@ -107,6 +111,37 @@ public interface MongoCollection { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

        If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

        + * + *
          + *
        • {@code null} means that the timeout mechanism for operations will defer to using: + *
            + *
          • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
          • + *
          • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
          • + *
          • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
          • + *
          • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
          • + *
          • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
          • + *
          + *
        • + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. * @@ -156,6 +191,23 @@ public interface MongoCollection { */ MongoCollection withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + *
          + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCollection instance with the set time limit for the full execution of an operation + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCollection withTimeout(long timeout, TimeUnit timeUnit); + /** * Gets an estimate of the count of documents in a collection using collection metadata. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java index e17f2d05259..b479ece08c5 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java @@ -19,15 +19,19 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.ThreadSafe; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import org.reactivestreams.Publisher; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoDatabase interface. @@ -74,6 +78,37 @@ public interface MongoDatabase { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

        If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

        + * + *
          + *
        • {@code null} means that the timeout mechanism for operations will defer to using: + *
            + *
          • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
          • + *
          • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
          • + *
          • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
          • + *
          • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
          • + *
          • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
          • + *
          + *
        • + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -114,6 +149,23 @@ public interface MongoDatabase { */ MongoDatabase withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * + *
          + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoDatabase instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoDatabase withTimeout(long timeout, TimeUnit timeUnit); + /** * Gets a collection. * @@ -135,6 +187,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param command the command to be run * @return a publisher containing the command result */ @@ -143,6 +198,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param command the command to be run * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command * @return a publisher containing the command result @@ -152,6 +210,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param command the command to be run * @param clazz the default class to cast any documents returned from the database into. * @param the type of the class to use instead of {@code Document}. @@ -162,6 +223,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param command the command to be run * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command * @param clazz the default class to cast any documents returned from the database into. @@ -173,6 +237,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @return a publisher containing the command result @@ -184,6 +251,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command @@ -196,6 +266,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param clazz the default class to cast any documents returned from the database into. @@ -209,6 +282,9 @@ public interface MongoDatabase { /** * Executes command in the context of the current database. * + *

        Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

        + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java index e0df38798d4..78a3f5357fc 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java @@ -19,9 +19,12 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.ThreadSafe; import com.mongodb.client.gridfs.model.GridFSDownloadOptions; import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; import org.bson.BsonValue; import org.bson.conversions.Bson; @@ -29,6 +32,7 @@ import org.reactivestreams.Publisher; import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; /** * Represents a GridFS Bucket @@ -75,6 +79,37 @@ public interface GridFSBucket { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

        If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

        + * + *
          + *
        • {@code null} means that the timeout mechanism for operations will defer to using: + *
            + *
          • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
          • + *
          • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
          • + *
          • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
          • + *
          • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
          • + *
          • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
          • + *
          + *
        • + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new GridFSBucket instance with a new chunk size in bytes. * @@ -109,6 +144,23 @@ public interface GridFSBucket { */ GridFSBucket withReadConcern(ReadConcern readConcern); + /** + * Create a new GridFSBucket instance with the set time limit for the full execution of an operation. + * + *
          + *
        • {@code 0} means infinite timeout.
        • + *
        • {@code > 0} The time limit to use for the full execution of an operation.
        • + *
        + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new GridFSBucket instance with the set time limit for the full execution of an operation + * @since 4.x + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + GridFSBucket withTimeout(long timeout, TimeUnit timeUnit); + /** * Uploads the contents of the given {@code Publisher} to a GridFS bucket. *

        diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java index f9160b030f0..d96c0e933da 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java @@ -18,11 +18,14 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.operation.AsyncExplainableReadOperation; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.AggregatePublisher; @@ -36,6 +39,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -74,6 +78,12 @@ public AggregatePublisher batchSize(final int batchSize) { return this; } + @Override + public AggregatePublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public AggregatePublisher maxTime(final long maxTime, final TimeUnit timeUnit) { notNull("timeUnit", timeUnit); @@ -83,8 +93,7 @@ public AggregatePublisher maxTime(final long maxTime, final TimeUnit timeUnit @Override public AggregatePublisher maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); return this; } @@ -136,7 +145,9 @@ public Publisher toCollection() { if (lastPipelineStage == null || !lastPipelineStage.containsKey("$out") && !lastPipelineStage.containsKey("$merge")) { throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge"); } - return getMongoOperationPublisher().createReadOperationMono(this::getAggregateToCollectionOperation, getClientSession()); + return getMongoOperationPublisher().createReadOperationMono( + (asyncOperations) -> asyncOperations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS), + this::getAggregateToCollectionOperation, getClientSession()); } @Override @@ -161,10 +172,10 @@ public Publisher explain(final Class explainResultClass, final Explain private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getMongoOperationPublisher().createReadOperationMono(() -> - asAggregateOperation(1).asAsyncExplainableOperation(verbosity, - getCodecRegistry().get(explainResultClass)), - getClientSession()); + return getMongoOperationPublisher().createReadOperationMono( + AsyncOperations::getTimeoutSettings, + () -> asAggregateOperation(1).asAsyncExplainableOperation(verbosity, + getCodecRegistry().get(explainResultClass)), getClientSession()); } @Override @@ -185,15 +196,20 @@ AsyncReadOperation> asAsyncReadOperation(final int initialBa } } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS)); + } + private AsyncExplainableReadOperation> asAggregateOperation(final int initialBatchSize) { return getOperations() - .aggregate(pipeline, getDocumentClass(), maxTimeMS, maxAwaitTimeMS, + .aggregate(pipeline, getDocumentClass(), getTimeoutMode(), initialBatchSize, collation, hint, hintString, comment, variables, allowDiskUse, aggregationLevel); } private AsyncReadOperation getAggregateToCollectionOperation() { - return getOperations().aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, comment, - variables, aggregationLevel); + return getOperations().aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, bypassDocumentValidation, + collation, hint, hintString, comment, variables, aggregationLevel); } @Nullable diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java index 3a19f14709f..cf5a9d9f25b 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java @@ -18,6 +18,8 @@ import com.mongodb.MongoNamespace; import com.mongodb.ReadPreference; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.operation.AsyncOperations; @@ -29,9 +31,12 @@ import org.reactivestreams.Subscriber; import reactor.core.publisher.Mono; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PROTECTED) @@ -39,6 +44,7 @@ public abstract class BatchCursorPublisher implements Publisher { private final ClientSession clientSession; private final MongoOperationPublisher mongoOperationPublisher; private Integer batchSize; + private TimeoutMode timeoutMode; BatchCursorPublisher(@Nullable final ClientSession clientSession, final MongoOperationPublisher mongoOperationPublisher) { this(clientSession, mongoOperationPublisher, null); @@ -52,6 +58,7 @@ public abstract class BatchCursorPublisher implements Publisher { } abstract AsyncReadOperation> asAsyncReadOperation(int initialBatchSize); + abstract Function, TimeoutSettings> getTimeoutSettings(); AsyncReadOperation> asAsyncFirstReadOperation() { return asAsyncReadOperation(1); @@ -101,6 +108,19 @@ public Publisher batchSize(final int batchSize) { return this; } + public Publisher timeoutMode(final TimeoutMode timeoutMode) { + if (mongoOperationPublisher.getTimeoutSettings().getTimeoutMS() == null) { + throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set."); + } + this.timeoutMode = timeoutMode; + return this; + } + + @Nullable + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + public Publisher first() { return batchCursor(this::asAsyncFirstReadOperation) .flatMap(batchCursor -> Mono.create(sink -> { @@ -130,7 +150,18 @@ public Mono> batchCursor(final int initialBatchSize) { } Mono> batchCursor(final Supplier>> supplier) { - return mongoOperationPublisher.createReadOperationMono(supplier, clientSession).map(BatchCursor::new); + return mongoOperationPublisher.createReadOperationMono(getTimeoutSettings(), supplier, clientSession).map(BatchCursor::new); } + + protected long validateMaxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + Long timeoutMS = mongoOperationPublisher.getTimeoutSettings().getTimeoutMS(); + long maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + + isTrueArgument("maxAwaitTimeMS must be less than timeoutMS", timeoutMS == null || timeoutMS == 0 + || timeoutMS > maxAwaitTimeMS); + + return maxAwaitTimeMS; + } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java index 06c1857287a..8fc1a093aab 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java @@ -20,8 +20,10 @@ import com.mongodb.client.model.changestream.ChangeStreamDocument; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ChangeStreamPublisher; @@ -36,9 +38,9 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; final class ChangeStreamPublisherImpl extends BatchCursorPublisher> @@ -121,8 +123,7 @@ public ChangeStreamPublisher comment(@Nullable final BsonValue comment) { @Override public ChangeStreamPublisher maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxAwaitTimeMS = MILLISECONDS.convert(maxAwaitTime, timeUnit); + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); return this; } @@ -140,6 +141,11 @@ public Publisher withDocumentClass(final Class AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { return createChangeStreamOperation(getMongoOperationPublisher().getCodecRegistry().get(clazz), initialBatchSize); } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(0, maxAwaitTimeMS)); + } }; } @@ -166,8 +172,14 @@ AsyncReadOperation>> asAsyncReadOperati return createChangeStreamOperation(codec, initialBatchSize); } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(0, maxAwaitTimeMS)); + } + private AsyncReadOperation> createChangeStreamOperation(final Codec codec, final int initialBatchSize) { return getOperations().changeStream(fullDocument, fullDocumentBeforeChange, pipeline, codec, changeStreamLevel, initialBatchSize, - collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java index 46fa37bf8d2..2e87b3bccf8 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java @@ -18,8 +18,6 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; import com.mongodb.connection.ClusterType; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.async.SingleResultCallback; @@ -32,7 +30,6 @@ import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.session.ClientSessionContext; -import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; import org.bson.BsonTimestamp; @@ -49,13 +46,13 @@ public class ClientSessionBinding extends AbstractReferenceCounted implements As private final AsyncClusterAwareReadWriteBinding wrapped; private final ClientSession session; private final boolean ownsSession; - private final ClientSessionContext sessionContext; + private final OperationContext operationContext; public ClientSessionBinding(final ClientSession session, final boolean ownsSession, final AsyncClusterAwareReadWriteBinding wrapped) { this.wrapped = notNull("wrapped", wrapped).retain(); this.ownsSession = ownsSession; this.session = notNull("session", session); - this.sessionContext = new AsyncClientSessionContext(session); + this.operationContext = wrapped.getOperationContext().withSessionContext(new AsyncClientSessionContext(session)); } @Override @@ -63,25 +60,9 @@ public ReadPreference getReadPreference() { return wrapped.getReadPreference(); } - @Override - public SessionContext getSessionContext() { - return sessionContext; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); - } - @Override public OperationContext getOperationContext() { - return wrapped.getOperationContext(); + return operationContext; } @Override @@ -159,25 +140,9 @@ public ServerDescription getServerDescription() { return wrapped.getServerDescription(); } - @Override - public SessionContext getSessionContext() { - return sessionContext; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); - } - @Override public OperationContext getOperationContext() { - return wrapped.getOperationContext(); + return operationContext; } @Override @@ -277,7 +242,7 @@ public ReadConcern getReadConcern() { } else if (isSnapshot()) { return ReadConcern.SNAPSHOT; } else { - return wrapped.getSessionContext().getReadConcern(); + return wrapped.getOperationContext().getSessionContext().getReadConcern(); } } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java index 9594a9ad533..62314c7e141 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java @@ -23,14 +23,16 @@ import com.mongodb.ReadConcern; import com.mongodb.TransactionOptions; import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.operation.AbortTransactionOperation; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.internal.operation.CommitTransactionOperation; +import com.mongodb.internal.operation.WriteConcernHelper; import com.mongodb.internal.session.BaseClientSessionImpl; import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; -import com.mongodb.reactivestreams.client.MongoClient; import org.reactivestreams.Publisher; import reactor.core.publisher.Mono; import reactor.core.publisher.MonoSink; @@ -41,20 +43,22 @@ import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; final class ClientSessionPublisherImpl extends BaseClientSessionImpl implements ClientSession { + private final MongoClientImpl mongoClient; private final OperationExecutor executor; private TransactionState transactionState = TransactionState.NONE; private boolean messageSentInCurrentTransaction; private boolean commitInProgress; private TransactionOptions transactionOptions; - ClientSessionPublisherImpl(final ServerSessionPool serverSessionPool, final MongoClient mongoClient, + + ClientSessionPublisherImpl(final ServerSessionPool serverSessionPool, final MongoClientImpl mongoClient, final ClientSessionOptions options, final OperationExecutor executor) { super(serverSessionPool, mongoClient, options); this.executor = executor; + this.mongoClient = mongoClient; } @Override @@ -100,6 +104,7 @@ public void startTransaction() { @Override public void startTransaction(final TransactionOptions transactionOptions) { notNull("transactionOptions", transactionOptions); + Boolean snapshot = getOptions().isSnapshot(); if (snapshot != null && snapshot) { throw new IllegalArgumentException("Transactions are not supported in snapshot sessions"); @@ -114,7 +119,9 @@ public void startTransaction(final TransactionOptions transactionOptions) { } getServerSession().advanceTransactionNumber(); this.transactionOptions = TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions()); - WriteConcern writeConcern = this.transactionOptions.getWriteConcern(); + + TimeoutContext timeoutContext = createTimeoutContext(); + WriteConcern writeConcern = getWriteConcern(timeoutContext); if (writeConcern == null) { throw new MongoInternalException("Invariant violated. Transaction options write concern can not be null"); } @@ -122,6 +129,16 @@ public void startTransaction(final TransactionOptions transactionOptions) { throw new MongoClientException("Transactions do not support unacknowledged write concern"); } clearTransactionContext(); + setTimeoutContext(timeoutContext); + } + + @Nullable + private WriteConcern getWriteConcern(@Nullable final TimeoutContext timeoutContext) { + WriteConcern writeConcern = transactionOptions.getWriteConcern(); + if (hasTimeoutMS(timeoutContext) && hasWTimeoutMS(writeConcern)) { + return WriteConcernHelper.cloneWithoutTimeout(writeConcern); + } + return writeConcern; } @Override @@ -142,12 +159,13 @@ public Publisher commitTransaction() { } boolean alreadyCommitted = commitInProgress || transactionState == TransactionState.COMMITTED; commitInProgress = true; - - return executor.execute( - new CommitTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()), alreadyCommitted) - .recoveryToken(getRecoveryToken()) - .maxCommitTime(transactionOptions.getMaxCommitTime(MILLISECONDS), MILLISECONDS), - readConcern, this) + resetTimeout(); + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + return executor + .execute( + new CommitTransactionOperation(writeConcern, alreadyCommitted) + .recoveryToken(getRecoveryToken()), readConcern, this) .doOnTerminate(() -> { commitInProgress = false; transactionState = TransactionState.COMMITTED; @@ -175,10 +193,13 @@ public Publisher abortTransaction() { if (readConcern == null) { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } - return executor.execute( - new AbortTransactionOperation(assertNotNull(transactionOptions.getWriteConcern())) - .recoveryToken(getRecoveryToken()), - readConcern, this) + + resetTimeout(); + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + return executor + .execute(new AbortTransactionOperation(writeConcern) + .recoveryToken(getRecoveryToken()), readConcern, this) .onErrorResume(Throwable.class, (e) -> Mono.empty()) .doOnTerminate(() -> { clearTransactionContext(); @@ -196,7 +217,7 @@ private void clearTransactionContextOnError(final MongoException e) { @Override public void close() { if (transactionState == TransactionState.IN) { - Mono.from(abortTransaction()).doOnSuccess(it -> close()).subscribe(); + Mono.from(abortTransaction()).doFinally(it -> super.close()).subscribe(); } else { super.close(); } @@ -206,9 +227,10 @@ private void cleanupTransaction(final TransactionState nextState) { messageSentInCurrentTransaction = false; transactionOptions = null; transactionState = nextState; + setTimeoutContext(null); } - private enum TransactionState { - NONE, IN, COMMITTED, ABORTED + private TimeoutContext createTimeoutContext() { + return new TimeoutContext(getTimeoutSettings(transactionOptions, executor.getTimeoutSettings())); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java index 16de864336f..84c0df234c5 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java @@ -16,8 +16,11 @@ package com.mongodb.reactivestreams.client.internal; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -27,6 +30,7 @@ import org.bson.conversions.Bson; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -84,9 +88,20 @@ public DistinctPublisher comment(@Nullable final BsonValue comment) { return this; } + @Override + public DistinctPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { // initialBatchSize is ignored for distinct operations. - return getOperations().distinct(fieldName, filter, getDocumentClass(), maxTimeMS, collation, comment); + return getOperations().distinct(fieldName, filter, getDocumentClass(), collation, comment); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java index 401c02dc583..ff9fb3a8036 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java @@ -18,10 +18,13 @@ import com.mongodb.CursorType; import com.mongodb.ExplainVerbosity; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.operation.AsyncExplainableReadOperation; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -32,6 +35,7 @@ import org.reactivestreams.Publisher; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -74,7 +78,7 @@ public FindPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { @Override public FindPublisher maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); + validateMaxAwaitTime(maxAwaitTime, timeUnit); findOptions.maxAwaitTime(maxAwaitTime, timeUnit); return this; } @@ -182,6 +186,13 @@ public FindPublisher allowDiskUse(@Nullable final Boolean allowDiskUse) { return this; } + @Override + public FindPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + findOptions.timeoutMode(timeoutMode); + return this; + } + @Override public Publisher explain() { return publishExplain(Document.class, null); @@ -204,10 +215,10 @@ public Publisher explain(final Class explainResultClass, final Explain private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getMongoOperationPublisher().createReadOperationMono(() -> - asAsyncReadOperation(0).asAsyncExplainableOperation(verbosity, - getCodecRegistry().get(explainResultClass)), - getClientSession()); + return getMongoOperationPublisher().createReadOperationMono( + getTimeoutSettings(), + () -> asAsyncReadOperation(0) + .asAsyncExplainableOperation(verbosity, getCodecRegistry().get(explainResultClass)), getClientSession()); } @Override @@ -215,6 +226,11 @@ AsyncExplainableReadOperation> asAsyncReadOperation(final in return getOperations().find(filter, getDocumentClass(), findOptions.withBatchSize(initialBatchSize)); } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(findOptions)); + } + @Override AsyncReadOperation> asAsyncFirstReadOperation() { return getOperations().findFirst(filter, getDocumentClass(), findOptions); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java index 056aaa615d4..057a8067ad3 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java @@ -17,7 +17,10 @@ package com.mongodb.reactivestreams.client.internal; import com.mongodb.ReadConcern; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -28,6 +31,7 @@ import org.bson.conversions.Bson; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -76,6 +80,14 @@ public ListCollectionsPublisher comment(@Nullable final BsonValue comment) { return this; } + + @SuppressWarnings("ReactiveStreamsUnusedPublisher") + @Override + public ListCollectionsPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + /** * @see ListCollectionNamesPublisher#authorizedCollections(boolean) */ @@ -83,8 +95,14 @@ void authorizedCollections(final boolean authorizedCollections) { this.authorizedCollections = authorizedCollections; } + AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { return getOperations().listCollections(getNamespace().getDatabaseName(), getDocumentClass(), filter, collectionNamesOnly, - authorizedCollections, initialBatchSize, maxTimeMS, comment); + authorizedCollections, initialBatchSize, comment, getTimeoutMode()); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java index 0157401cf66..b897a8bf9df 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java @@ -16,7 +16,10 @@ package com.mongodb.reactivestreams.client.internal; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -26,6 +29,7 @@ import org.bson.conversions.Bson; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -82,8 +86,19 @@ public ListDatabasesPublisher comment(@Nullable final BsonValue comment) { return this; } + @Override + public ListDatabasesPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); + } + AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { -// initialBatchSize is ignored for distinct operations. - return getOperations().listDatabases(getDocumentClass(), filter, nameOnly, maxTimeMS, authorizedDatabasesOnly, comment); + // initialBatchSize is ignored for distinct operations. + return getOperations().listDatabases(getDocumentClass(), filter, nameOnly, authorizedDatabasesOnly, comment); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java index 22a1f536dc0..79e5ce2a14a 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java @@ -16,7 +16,10 @@ package com.mongodb.reactivestreams.client.internal; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -25,6 +28,7 @@ import org.bson.BsonValue; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -62,7 +66,19 @@ public ListIndexesPublisher comment(@Nullable final BsonValue comment) { return this; } + @SuppressWarnings("ReactiveStreamsUnusedPublisher") + @Override + public ListIndexesPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { - return getOperations().listIndexes(getDocumentClass(), initialBatchSize, maxTimeMS, comment); + return getOperations().listIndexes(getDocumentClass(), initialBatchSize, comment, getTimeoutMode()); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java index 474ed7a6b09..035d7d3bbec 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java @@ -17,9 +17,12 @@ package com.mongodb.reactivestreams.client.internal; import com.mongodb.ExplainVerbosity; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.operation.AsyncExplainableReadOperation; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher; @@ -29,6 +32,7 @@ import org.reactivestreams.Publisher; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -85,6 +89,12 @@ public ListSearchIndexesPublisher comment(@Nullable final String comment) { return this; } + @Override + public ListSearchIndexesPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public ListSearchIndexesPublisher comment(@Nullable final BsonValue comment) { this.comment = comment; @@ -117,8 +127,9 @@ public Publisher explain(final Class explainResultClass, final Explain } private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { - return getMongoOperationPublisher().createReadOperationMono(() -> - asAggregateOperation(1).asAsyncExplainableOperation(verbosity, + return getMongoOperationPublisher().createReadOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)), + () -> asAggregateOperation(1).asAsyncExplainableOperation(verbosity, getCodecRegistry().get(explainResultClass)), getClientSession()); } @@ -127,9 +138,12 @@ AsyncReadOperation> asAsyncReadOperation(final int initialBa return asAggregateOperation(initialBatchSize); } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); + } + private AsyncExplainableReadOperation> asAggregateOperation(final int initialBatchSize) { - return getOperations().listSearchIndexes(getDocumentClass(), maxTimeMS, indexName, initialBatchSize, collation, - comment, - allowDiskUse); + return getOperations().listSearchIndexes(getDocumentClass(), indexName, initialBatchSize, collation, comment, allowDiskUse); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java index 37e30e04e07..f8371c8afb6 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java @@ -18,12 +18,15 @@ import com.mongodb.MongoNamespace; import com.mongodb.ReadPreference; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.internal.operation.MapReduceAsyncBatchCursor; @@ -35,6 +38,7 @@ import org.reactivestreams.Publisher; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.notNull; @@ -151,12 +155,21 @@ public com.mongodb.reactivestreams.client.MapReducePublisher bypassDocumentVa return this; } + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public Publisher toCollection() { if (inline) { throw new IllegalStateException("The options must specify a non-inline result"); } - return getMongoOperationPublisher().createWriteOperationMono(this::createMapReduceToCollectionOperation, getClientSession()); + return getMongoOperationPublisher().createWriteOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)), + this::createMapReduceToCollectionOperation, + getClientSession()); } @Override @@ -174,6 +187,11 @@ ReadPreference getReadPreference() { } } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); + } + @Override AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { if (inline) { @@ -187,15 +205,13 @@ AsyncReadOperation> asAsyncReadOperation(final int initialBa private WrappedMapReduceReadOperation createMapReduceInlineOperation() { return new WrappedMapReduceReadOperation<>(getOperations().mapReduce(mapFunction, reduceFunction, finalizeFunction, - getDocumentClass(), filter, limit, maxTimeMS, jsMode, scope, - sort, verbose, collation)); + getDocumentClass(), filter, limit, jsMode, scope, sort, verbose, collation)); } private WrappedMapReduceWriteOperation createMapReduceToCollectionOperation() { - return new WrappedMapReduceWriteOperation(getOperations().mapReduceToCollection(databaseName, collectionName, mapFunction, - reduceFunction, finalizeFunction, filter, limit, - maxTimeMS, jsMode, scope, sort, verbose, action, - bypassDocumentValidation, collation)); + return new WrappedMapReduceWriteOperation( + getOperations().mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, + limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation)); } private AsyncReadOperation> createFindOperation(final int initialBatchSize) { diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java index 95526e86ea5..27a0c9195c3 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java @@ -18,10 +18,14 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.ClientSessionOptions; +import com.mongodb.ContextProvider; import com.mongodb.MongoClientSettings; import com.mongodb.MongoDriverInformation; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; import com.mongodb.connection.ClusterDescription; -import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; @@ -31,18 +35,19 @@ import com.mongodb.reactivestreams.client.ClientSession; import com.mongodb.reactivestreams.client.ListDatabasesPublisher; import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoCluster; import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.ReactiveContextProvider; import com.mongodb.reactivestreams.client.internal.crypt.Crypt; import com.mongodb.reactivestreams.client.internal.crypt.Crypts; import org.bson.BsonDocument; import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static com.mongodb.assertions.Assertions.notNull; @@ -59,14 +64,10 @@ public final class MongoClientImpl implements MongoClient { private static final Logger LOGGER = Loggers.getLogger("client"); - private final Cluster cluster; private final MongoClientSettings settings; - private final OperationExecutor executor; private final AutoCloseable externalResourceCloser; - private final ServerSessionPool serverSessionPool; - private final ClientSessionHelper clientSessionHelper; - private final MongoOperationPublisher mongoOperationPublisher; - private final Crypt crypt; + + private final MongoClusterImpl delegate; private final AtomicBoolean closed; public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation, final Cluster cluster, @@ -81,66 +82,72 @@ public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInfo private MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation, final Cluster cluster, @Nullable final OperationExecutor executor, @Nullable final AutoCloseable externalResourceCloser) { - this.settings = notNull("settings", settings); - this.cluster = notNull("cluster", cluster); - this.serverSessionPool = new ServerSessionPool(cluster, settings.getServerApi()); - this.clientSessionHelper = new ClientSessionHelper(this, serverSessionPool); + notNull("settings", settings); + notNull("cluster", cluster); + + TimeoutSettings timeoutSettings = TimeoutSettings.create(settings); + ServerSessionPool serverSessionPool = new ServerSessionPool(cluster, timeoutSettings, settings.getServerApi()); + ClientSessionHelper clientSessionHelper = new ClientSessionHelper(this, serverSessionPool); + AutoEncryptionSettings autoEncryptSettings = settings.getAutoEncryptionSettings(); - this.crypt = autoEncryptSettings != null ? Crypts.createCrypt(this, autoEncryptSettings) : null; - if (executor == null) { - this.executor = new OperationExecutorImpl(this, clientSessionHelper); - } else { - this.executor = executor; + Crypt crypt = autoEncryptSettings != null ? Crypts.createCrypt(settings, autoEncryptSettings) : null; + ContextProvider contextProvider = settings.getContextProvider(); + if (contextProvider != null && !(contextProvider instanceof ReactiveContextProvider)) { + throw new IllegalArgumentException("The contextProvider must be an instance of " + + ReactiveContextProvider.class.getName() + " when using the Reactive Streams driver"); } + OperationExecutor operationExecutor = executor != null ? executor + : new OperationExecutorImpl(this, clientSessionHelper, timeoutSettings, (ReactiveContextProvider) contextProvider); + MongoOperationPublisher mongoOperationPublisher = new MongoOperationPublisher<>(Document.class, + withUuidRepresentation(settings.getCodecRegistry(), + settings.getUuidRepresentation()), + settings.getReadPreference(), + settings.getReadConcern(), settings.getWriteConcern(), + settings.getRetryWrites(), settings.getRetryReads(), + settings.getUuidRepresentation(), + settings.getAutoEncryptionSettings(), + timeoutSettings, + operationExecutor); + + this.delegate = new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher); this.externalResourceCloser = externalResourceCloser; - this.mongoOperationPublisher = new MongoOperationPublisher<>(Document.class, - withUuidRepresentation(settings.getCodecRegistry(), - settings.getUuidRepresentation()), - settings.getReadPreference(), - settings.getReadConcern(), settings.getWriteConcern(), - settings.getRetryWrites(), settings.getRetryReads(), - settings.getUuidRepresentation(), - settings.getAutoEncryptionSettings(), - this.executor); + this.settings = settings; this.closed = new AtomicBoolean(); BsonDocument clientMetadataDocument = createClientMetadataDocument(settings.getApplicationName(), mongoDriverInformation); LOGGER.info(format("MongoClient with metadata %s created with settings %s", clientMetadataDocument.toJson(), settings)); } Cluster getCluster() { - return cluster; + return delegate.getCluster(); } public ServerSessionPool getServerSessionPool() { - return serverSessionPool; + return delegate.getServerSessionPool(); } MongoOperationPublisher getMongoOperationPublisher() { - return mongoOperationPublisher; + return delegate.getMongoOperationPublisher(); } @Nullable Crypt getCrypt() { - return crypt; + return delegate.getCrypt(); } public MongoClientSettings getSettings() { return settings; } - @Override - public MongoDatabase getDatabase(final String name) { - return new MongoDatabaseImpl(mongoOperationPublisher.withDatabase(name)); - } - @Override public void close() { if (!closed.getAndSet(true)) { + Crypt crypt = getCrypt(); if (crypt != null) { crypt.close(); } - serverSessionPool.close(); - cluster.close(); + getServerSessionPool().close(); + getCluster().close(); if (externalResourceCloser != null) { try { externalResourceCloser.close(); @@ -153,91 +160,142 @@ public void close() { @Override public Publisher listDatabaseNames() { - return Flux.from(listDatabases().nameOnly(true)).map(d -> d.getString("name")); + return delegate.listDatabaseNames(); } @Override public Publisher listDatabaseNames(final ClientSession clientSession) { - return Flux.from(listDatabases(clientSession).nameOnly(true)).map(d -> d.getString("name")); + return delegate.listDatabaseNames(clientSession); } @Override public ListDatabasesPublisher listDatabases() { - return listDatabases(Document.class); + return delegate.listDatabases(); } @Override - public ListDatabasesPublisher listDatabases(final Class clazz) { - return new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(clazz)); + public ListDatabasesPublisher listDatabases(final Class clazz) { + return delegate.listDatabases(clazz); } @Override public ListDatabasesPublisher listDatabases(final ClientSession clientSession) { - return listDatabases(clientSession, Document.class); + return delegate.listDatabases(clientSession); } @Override - public ListDatabasesPublisher listDatabases(final ClientSession clientSession, final Class clazz) { - return new ListDatabasesPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDocumentClass(clazz)); + public ListDatabasesPublisher listDatabases(final ClientSession clientSession, final Class clazz) { + return delegate.listDatabases(clientSession, clazz); } @Override public ChangeStreamPublisher watch() { - return watch(Collections.emptyList()); + return delegate.watch(); } @Override - public ChangeStreamPublisher watch(final Class resultClass) { - return watch(Collections.emptyList(), resultClass); + public ChangeStreamPublisher watch(final Class resultClass) { + return delegate.watch(resultClass); } @Override public ChangeStreamPublisher watch(final List pipeline) { - return watch(pipeline, Document.class); + return delegate.watch(pipeline); } @Override - public ChangeStreamPublisher watch(final List pipeline, final Class resultClass) { - return new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), - resultClass, pipeline, ChangeStreamLevel.CLIENT); + public ChangeStreamPublisher watch(final List pipeline, final Class resultClass) { + return delegate.watch(pipeline, resultClass); } @Override public ChangeStreamPublisher watch(final ClientSession clientSession) { - return watch(clientSession, Collections.emptyList(), Document.class); + return delegate.watch(clientSession); } @Override - public ChangeStreamPublisher watch(final ClientSession clientSession, final Class resultClass) { - return watch(clientSession, Collections.emptyList(), resultClass); + public ChangeStreamPublisher watch(final ClientSession clientSession, final Class resultClass) { + return delegate.watch(clientSession, resultClass); } @Override public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline) { - return watch(clientSession, pipeline, Document.class); + return delegate.watch(clientSession, pipeline); } @Override - public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline, - final Class resultClass) { - return new ChangeStreamPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDatabase("admin"), - resultClass, pipeline, ChangeStreamLevel.CLIENT); + public ChangeStreamPublisher watch( + final ClientSession clientSession, final List pipeline, final Class resultClass) { + return delegate.watch(clientSession, pipeline, resultClass); } @Override public Publisher startSession() { - return startSession(ClientSessionOptions.builder().build()); + return delegate.startSession(); } @Override public Publisher startSession(final ClientSessionOptions options) { - notNull("options", options); - return Mono.fromCallable(() -> clientSessionHelper.createClientSession(options, executor)); + return delegate.startSession(options); + } + + @Override + public CodecRegistry getCodecRegistry() { + return delegate.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return delegate.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return delegate.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return delegate.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return null; + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return delegate.withCodecRegistry(codecRegistry); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return delegate.withReadPreference(readPreference); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return delegate.withWriteConcern(writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return delegate.withReadConcern(readConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return delegate.withTimeout(timeout, timeUnit); + } + + @Override + public MongoDatabase getDatabase(final String name) { + return delegate.getDatabase(name); } @Override public ClusterDescription getClusterDescription() { return getCluster().getCurrentDescription(); } - } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java new file mode 100644 index 00000000000..72bcf53e303 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java @@ -0,0 +1,240 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import com.mongodb.reactivestreams.client.MongoCluster; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.internal.crypt.Crypt; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class MongoClusterImpl implements MongoCluster { + + private final Cluster cluster; + private final Crypt crypt; + private final OperationExecutor operationExecutor; + private final ServerSessionPool serverSessionPool; + private final ClientSessionHelper clientSessionHelper; + private final MongoOperationPublisher mongoOperationPublisher; + + MongoClusterImpl(final Cluster cluster, @Nullable final Crypt crypt, final OperationExecutor operationExecutor, + final ServerSessionPool serverSessionPool, final ClientSessionHelper clientSessionHelper, + final MongoOperationPublisher mongoOperationPublisher) { + + this.cluster = cluster; + this.crypt = crypt; + this.operationExecutor = operationExecutor; + this.serverSessionPool = serverSessionPool; + this.clientSessionHelper = clientSessionHelper; + this.mongoOperationPublisher = mongoOperationPublisher; + } + + @Override + public CodecRegistry getCodecRegistry() { + return mongoOperationPublisher.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return mongoOperationPublisher.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return mongoOperationPublisher.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return mongoOperationPublisher.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutMS(); + return timeoutMS != null ? MILLISECONDS.convert(timeoutMS, timeUnit) : null; + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withReadPreference(readPreference)); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withWriteConcern(writeConcern)); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withReadConcern(readConcern)); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + + public Cluster getCluster() { + return cluster; + } + + @Nullable + public Crypt getCrypt() { + return crypt; + } + + public ClientSessionHelper getClientSessionHelper() { + return clientSessionHelper; + } + + public ServerSessionPool getServerSessionPool() { + return serverSessionPool; + } + + public MongoOperationPublisher getMongoOperationPublisher() { + return mongoOperationPublisher; + } + + public TimeoutSettings getTimeoutSettings() { + return mongoOperationPublisher.getTimeoutSettings(); + } + + @Override + public Publisher startSession() { + return startSession(ClientSessionOptions.builder().build()); + } + + @Override + public Publisher startSession(final ClientSessionOptions options) { + notNull("options", options); + return Mono.fromCallable(() -> clientSessionHelper.createClientSession(options, operationExecutor)); + } + + + @Override + public MongoDatabase getDatabase(final String name) { + return new MongoDatabaseImpl(mongoOperationPublisher.withDatabase(name)); + } + + @Override + public Publisher listDatabaseNames() { + return Flux.from(listDatabases().nameOnly(true)).map(d -> d.getString("name")); + } + + @Override + public Publisher listDatabaseNames(final ClientSession clientSession) { + return Flux.from(listDatabases(clientSession).nameOnly(true)).map(d -> d.getString("name")); + } + + @Override + public ListDatabasesPublisher listDatabases() { + return listDatabases(Document.class); + } + + @Override + public ListDatabasesPublisher listDatabases(final Class clazz) { + return new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(clazz)); + } + + @Override + public ListDatabasesPublisher listDatabases(final ClientSession clientSession) { + return listDatabases(clientSession, Document.class); + } + + @Override + public ListDatabasesPublisher listDatabases(final ClientSession clientSession, final Class clazz) { + return new ListDatabasesPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDocumentClass(clazz)); + } + + @Override + public ChangeStreamPublisher watch() { + return watch(Collections.emptyList()); + } + + @Override + public ChangeStreamPublisher watch(final Class resultClass) { + return watch(Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline) { + return watch(pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline, final Class resultClass) { + return new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + resultClass, pipeline, ChangeStreamLevel.CLIENT); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession) { + return watch(clientSession, Collections.emptyList(), Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final Class resultClass) { + return watch(clientSession, Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new ChangeStreamPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDatabase("admin"), + resultClass, pipeline, ChangeStreamLevel.CLIENT); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java index d9fa18c6a54..0ac3d6a2e39 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java @@ -62,6 +62,7 @@ import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; @@ -105,6 +106,12 @@ public ReadConcern getReadConcern() { return mongoOperationPublisher.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutMS(); + return (timeoutMS != null) ? notNull("timeUnit", timeUnit).convert(timeoutMS, TimeUnit.MILLISECONDS) : null; + } + MongoOperationPublisher getPublisherHelper() { return mongoOperationPublisher; } @@ -134,6 +141,11 @@ public MongoCollection withReadConcern(final ReadConcern readConcern) { return new MongoCollectionImpl<>(mongoOperationPublisher.withReadConcern(readConcern)); } + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + @Override public Publisher estimatedDocumentCount() { return estimatedDocumentCount(new EstimatedDocumentCountOptions()); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java index 268b9df8081..f8709f12ad8 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java @@ -38,10 +38,12 @@ import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.MongoNamespace.checkDatabaseNameValidity; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** @@ -82,6 +84,12 @@ public ReadConcern getReadConcern() { return mongoOperationPublisher.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutSettings().getTimeoutMS(); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + MongoOperationPublisher getMongoOperationPublisher() { return mongoOperationPublisher; } @@ -106,6 +114,11 @@ public MongoDatabase withReadConcern(final ReadConcern readConcern) { return new MongoDatabaseImpl(mongoOperationPublisher.withReadConcern(readConcern)); } + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoDatabaseImpl(mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + @Override public MongoCollection getCollection(final String collectionName) { return getCollection(collectionName, Document.class); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java index b82bb5b7362..5ccea518cb5 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java @@ -54,6 +54,7 @@ import com.mongodb.client.result.InsertManyResult; import com.mongodb.client.result.InsertOneResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.bulk.WriteRequest; import com.mongodb.internal.operation.AsyncOperations; @@ -74,6 +75,8 @@ import java.util.HashMap; import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; @@ -95,22 +98,22 @@ public final class MongoOperationPublisher { final Class documentClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, final UuidRepresentation uuidRepresentation, @Nullable final AutoEncryptionSettings autoEncryptionSettings, - final OperationExecutor executor) { + final TimeoutSettings timeoutSettings, final OperationExecutor executor) { this(new MongoNamespace("_ignored", "_ignored"), documentClass, codecRegistry, readPreference, readConcern, writeConcern, retryWrites, retryReads, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } MongoOperationPublisher( final MongoNamespace namespace, final Class documentClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, final UuidRepresentation uuidRepresentation, - @Nullable final AutoEncryptionSettings autoEncryptionSettings, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings, final OperationExecutor executor) { this.operations = new AsyncOperations<>(namespace, notNull("documentClass", documentClass), notNull("readPreference", readPreference), notNull("codecRegistry", codecRegistry), notNull("readConcern", readConcern), notNull("writeConcern", writeConcern), - retryWrites, retryReads); + retryWrites, retryReads, timeoutSettings); this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); this.autoEncryptionSettings = autoEncryptionSettings; this.executor = notNull("executor", executor); @@ -144,6 +147,15 @@ public boolean getRetryReads() { return operations.isRetryReads(); } + @Nullable + public Long getTimeoutMS() { + return getTimeoutSettings().getTimeoutMS(); + } + + public TimeoutSettings getTimeoutSettings() { + return operations.getTimeoutSettings(); + } + Class getDocumentClass() { return operations.getDocumentClass(); } @@ -175,15 +187,15 @@ MongoOperationPublisher withNamespaceAndDocumentClass(final MongoNamespac return (MongoOperationPublisher) this; } return new MongoOperationPublisher<>(notNull("namespace", namespace), notNull("documentClass", documentClass), - getCodecRegistry(), getReadPreference(), getReadConcern(), getWriteConcern(), - getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, executor); + getCodecRegistry(), getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor); } MongoOperationPublisher withCodecRegistry(final CodecRegistry codecRegistry) { return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), - withUuidRepresentation(notNull("codecRegistry", codecRegistry), uuidRepresentation), - getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), - uuidRepresentation, autoEncryptionSettings, executor); + withUuidRepresentation(notNull("codecRegistry", codecRegistry), uuidRepresentation), + getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor); } MongoOperationPublisher withReadPreference(final ReadPreference readPreference) { @@ -191,9 +203,8 @@ MongoOperationPublisher withReadPreference(final ReadPreference readPreferenc return this; } return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(), - notNull("readPreference", readPreference), - getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), - uuidRepresentation, autoEncryptionSettings, executor); + notNull("readPreference", readPreference), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor); } MongoOperationPublisher withWriteConcern(final WriteConcern writeConcern) { @@ -201,8 +212,8 @@ MongoOperationPublisher withWriteConcern(final WriteConcern writeConcern) { return this; } return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(), getReadPreference(), getReadConcern(), - notNull("writeConcern", writeConcern), - getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, executor); + notNull("writeConcern", writeConcern), getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, + getTimeoutSettings(), executor); } MongoOperationPublisher withReadConcern(final ReadConcern readConcern) { @@ -210,24 +221,39 @@ MongoOperationPublisher withReadConcern(final ReadConcern readConcern) { return this; } return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), - getCodecRegistry(), getReadPreference(), notNull("readConcern", readConcern), - getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, - autoEncryptionSettings, executor); + getCodecRegistry(), getReadPreference(), notNull("readConcern", readConcern), + getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, + autoEncryptionSettings, getTimeoutSettings(), executor); + } + + MongoOperationPublisher withTimeout(final long timeout, final TimeUnit timeUnit) { + TimeoutSettings timeoutSettings = getTimeoutSettings().withTimeout(timeout, timeUnit); + if (Objects.equals(getTimeoutSettings(), timeoutSettings)) { + return this; + } + return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), + getCodecRegistry(), getReadPreference(), getReadConcern(), + getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, + autoEncryptionSettings, timeoutSettings, executor); } Publisher dropDatabase(@Nullable final ClientSession clientSession) { - return createWriteOperationMono(operations::dropDatabase, clientSession); + return createWriteOperationMono(operations::getTimeoutSettings, operations::dropDatabase, clientSession); } Publisher createCollection( @Nullable final ClientSession clientSession, final String collectionName, final CreateCollectionOptions options) { - return createWriteOperationMono(() -> operations.createCollection(collectionName, options, autoEncryptionSettings), clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createCollection(collectionName, options, autoEncryptionSettings), clientSession); } Publisher createView( @Nullable final ClientSession clientSession, final String viewName, final String viewOn, final List pipeline, final CreateViewOptions options) { - return createWriteOperationMono(() -> operations.createView(viewName, viewOn, pipeline, options), clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createView(viewName, viewOn, pipeline, options), clientSession); } public Publisher runCommand( @@ -237,24 +263,30 @@ public Publisher runCommand( return Mono.error(new MongoClientException("Read preference in a transaction must be primary")); } return createReadOperationMono( + operations::getTimeoutSettings, () -> operations.commandRead(command, clazz), clientSession, notNull("readPreference", readPreference)); } Publisher estimatedDocumentCount(final EstimatedDocumentCountOptions options) { - return createReadOperationMono(() -> operations.estimatedDocumentCount(notNull("options", options)), null); + return createReadOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(options)), + () -> operations.estimatedDocumentCount(notNull("options", options)), null); } Publisher countDocuments(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) { - return createReadOperationMono(() -> operations.countDocuments(notNull("filter", filter), notNull("options", options) + return createReadOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(options)), + () -> operations.countDocuments(notNull("filter", filter), notNull("options", options) ), clientSession); } Publisher bulkWrite( @Nullable final ClientSession clientSession, final List> requests, final BulkWriteOptions options) { - return createWriteOperationMono(() -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)), - clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)), clientSession); } Publisher insertOne(@Nullable final ClientSession clientSession, final T document, final InsertOneOptions options) { @@ -267,8 +299,9 @@ Publisher insertOne(@Nullable final ClientSession clientSession Publisher insertMany( @Nullable final ClientSession clientSession, final List documents, final InsertManyOptions options) { - return createWriteOperationMono(() -> operations.insertMany(notNull("documents", documents), notNull("options", options)), - clientSession) + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.insertMany(notNull("documents", documents), notNull("options", options)), clientSession) .map(INSERT_MANY_RESULT_MAPPER); } @@ -335,15 +368,17 @@ Publisher updateMany( } Publisher findOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) { - return createWriteOperationMono(() -> operations.findOneAndDelete(notNull("filter", filter), - notNull("options", options)), - clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndDelete(notNull("filter", filter), notNull("options", options)), clientSession); } Publisher findOneAndReplace( @Nullable final ClientSession clientSession, final Bson filter, final T replacement, final FindOneAndReplaceOptions options) { - return createWriteOperationMono(() -> operations.findOneAndReplace(notNull("filter", filter), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndReplace(notNull("filter", filter), notNull("replacement", replacement), notNull("options", options)), clientSession); @@ -352,7 +387,9 @@ Publisher findOneAndReplace( Publisher findOneAndUpdate( @Nullable final ClientSession clientSession, final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { - return createWriteOperationMono(() -> operations.findOneAndUpdate(notNull("filter", filter), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndUpdate(notNull("filter", filter), notNull("update", update), notNull("options", options)), clientSession); @@ -361,14 +398,18 @@ Publisher findOneAndUpdate( Publisher findOneAndUpdate( @Nullable final ClientSession clientSession, final Bson filter, final List update, final FindOneAndUpdateOptions options) { - return createWriteOperationMono(() -> operations.findOneAndUpdate(notNull("filter", filter), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndUpdate(notNull("filter", filter), notNull("update", update), notNull("options", options)), clientSession); } Publisher dropCollection(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { - return createWriteOperationMono(() -> operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), clientSession); } Publisher createIndex(@Nullable final ClientSession clientSession, final Bson key, final IndexOptions options) { @@ -379,8 +420,9 @@ Publisher createIndex(@Nullable final ClientSession clientSession, final Publisher createIndexes( @Nullable final ClientSession clientSession, final List indexes, final CreateIndexOptions options) { - return createWriteOperationMono(() -> operations.createIndexes(notNull("indexes", indexes), - notNull("options", options)), clientSession) + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createIndexes(notNull("indexes", indexes), notNull("options", options)), clientSession) .thenMany(Flux.fromIterable(IndexHelper.getIndexNames(indexes, getCodecRegistry()))); } @@ -392,27 +434,37 @@ Publisher createSearchIndex(@Nullable final String indexName, final Bson } Publisher createSearchIndexes(final List indexes) { - return createWriteOperationMono(() -> operations.createSearchIndexes(indexes), null) + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createSearchIndexes(indexes), null) .thenMany(Flux.fromIterable(IndexHelper.getSearchIndexNames(indexes))); } public Publisher updateSearchIndex(final String name, final Bson definition) { - return createWriteOperationMono(() -> operations.updateSearchIndex(name, definition), null); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.updateSearchIndex(name, definition), null); } public Publisher dropSearchIndex(final String indexName) { - return createWriteOperationMono(() -> operations.dropSearchIndex(indexName), null); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropSearchIndex(indexName), null); } Publisher dropIndex(@Nullable final ClientSession clientSession, final String indexName, final DropIndexOptions options) { - return createWriteOperationMono(() -> operations.dropIndex(notNull("indexName", indexName), notNull("options", options)), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropIndex(notNull("indexName", indexName), notNull("options", options)), clientSession); } Publisher dropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) { - return createWriteOperationMono(() -> operations.dropIndex(notNull("keys", keys), notNull("options", options)), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropIndex(notNull("keys", keys), notNull("options", options)), clientSession); } @@ -423,35 +475,45 @@ Publisher dropIndexes(@Nullable final ClientSession clientSession, final D Publisher renameCollection( @Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace, final RenameCollectionOptions options) { - return createWriteOperationMono(() -> operations.renameCollection(notNull("newCollectionNamespace", newCollectionNamespace), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.renameCollection(notNull("newCollectionNamespace", newCollectionNamespace), notNull("options", options)), clientSession); } - Mono createReadOperationMono( - final Supplier> operation, - @Nullable final ClientSession clientSession) { - return createReadOperationMono(operation, clientSession, getReadPreference()); + + Mono createReadOperationMono(final Function, TimeoutSettings> timeoutSettingsFunction, + final Supplier> operation, @Nullable final ClientSession clientSession) { + return createReadOperationMono(() -> timeoutSettingsFunction.apply(operations), operation, clientSession, getReadPreference()); } - Mono createReadOperationMono( - final Supplier> operation, - @Nullable final ClientSession clientSession, + + Mono createReadOperationMono(final Supplier timeoutSettingsSupplier, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession, final ReadPreference readPreference) { - AsyncReadOperation readOperation = operation.get(); - return executor.execute(readOperation, readPreference, getReadConcern(), clientSession); + AsyncReadOperation readOperation = operationSupplier.get(); + return getExecutor(timeoutSettingsSupplier.get()) + .execute(readOperation, readPreference, getReadConcern(), clientSession); + } + + Mono createWriteOperationMono(final Function, TimeoutSettings> timeoutSettingsFunction, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession) { + return createWriteOperationMono(() -> timeoutSettingsFunction.apply(operations), operationSupplier, clientSession); } - Mono createWriteOperationMono(final Supplier> operation, @Nullable final ClientSession clientSession) { - AsyncWriteOperation writeOperation = operation.get(); - return executor.execute(writeOperation, getReadConcern(), clientSession); + Mono createWriteOperationMono(final Supplier timeoutSettingsSupplier, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession) { + AsyncWriteOperation writeOperation = operationSupplier.get(); + return getExecutor(timeoutSettingsSupplier.get()) + .execute(writeOperation, getReadConcern(), clientSession); } private Mono createSingleWriteRequestMono( final Supplier> operation, @Nullable final ClientSession clientSession, final WriteRequest.Type type) { - return createWriteOperationMono(operation, clientSession) + return createWriteOperationMono(operations::getTimeoutSettings, operation, clientSession) .onErrorMap(MongoBulkWriteException.class, e -> { MongoException exception; WriteConcernError writeConcernError = e.getWriteConcernError(); @@ -482,6 +544,10 @@ private Mono createSingleWriteRequestMono( }); } + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + private static final Function INSERT_ONE_RESULT_MAPPER = result -> { if (result.wasAcknowledged()) { BsonValue insertedId = result.getInserts().isEmpty() ? null : result.getInserts().get(0).getId(); @@ -526,6 +592,3 @@ public static SingleResultCallback sinkToCallback(final MonoSink sink) }; } } - - - diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java index 371168bedd8..dc165e5a5d4 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java @@ -18,6 +18,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.lang.Nullable; @@ -52,4 +53,21 @@ Mono execute(AsyncReadOperation operation, ReadPreference readPreferen * @param the operations result type. */ Mono execute(AsyncWriteOperation operation, ReadConcern readConcern, @Nullable ClientSession session); + + /** + * Create a new OperationExecutor with a specific timeout settings + * + * @param timeoutSettings the TimeoutContext to use for the operations + * @return the new operation executor with the set timeout context + * @since 5.2 + */ + OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings); + + /** + * Returns the current timeout settings + * + * @return the timeout settings + * @since 5.2 + */ + TimeoutSettings getTimeoutSettings(); } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java index cb9c37bea8f..1c89ab81d34 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java @@ -15,7 +15,6 @@ */ package com.mongodb.reactivestreams.client.internal; -import com.mongodb.ContextProvider; import com.mongodb.MongoClientException; import com.mongodb.MongoException; import com.mongodb.MongoInternalException; @@ -26,9 +25,12 @@ import com.mongodb.ReadPreference; import com.mongodb.RequestContext; import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding; import com.mongodb.internal.binding.AsyncClusterBinding; import com.mongodb.internal.binding.AsyncReadWriteBinding; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.lang.Nullable; @@ -39,10 +41,13 @@ import org.reactivestreams.Subscriber; import reactor.core.publisher.Mono; +import java.util.Objects; + import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.createTimeoutContext; import static com.mongodb.reactivestreams.client.internal.MongoOperationPublisher.sinkToCallback; /** @@ -52,17 +57,16 @@ public class OperationExecutorImpl implements OperationExecutor { private final MongoClientImpl mongoClient; private final ClientSessionHelper clientSessionHelper; + @Nullable private final ReactiveContextProvider contextProvider; + private final TimeoutSettings timeoutSettings; - OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper) { + OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper, + final TimeoutSettings timeoutSettings, @Nullable final ReactiveContextProvider contextProvider) { this.mongoClient = mongoClient; this.clientSessionHelper = clientSessionHelper; - ContextProvider contextProvider = mongoClient.getSettings().getContextProvider(); - if (contextProvider != null && !(contextProvider instanceof ReactiveContextProvider)) { - throw new IllegalArgumentException("The contextProvider must be an instance of " - + ReactiveContextProvider.class.getName() + " when using the Reactive Streams driver"); - } - this.contextProvider = (ReactiveContextProvider) contextProvider; + this.timeoutSettings = timeoutSettings; + this.contextProvider = contextProvider; } @Override @@ -78,10 +82,8 @@ public Mono execute(final AsyncReadOperation operation, final ReadPref return Mono.from(subscriber -> clientSessionHelper.withClientSession(session, this) - .map(clientSession -> getReadWriteBinding(getContext(subscriber), readPreference, readConcern, clientSession, - session == null && clientSession != null)) - .switchIfEmpty(Mono.fromCallable(() -> - getReadWriteBinding(getContext(subscriber), readPreference, readConcern, session, false))) + .map(clientSession -> getReadWriteBinding(getContext(subscriber), + readPreference, readConcern, clientSession, session == null)) .flatMap(binding -> { if (session != null && session.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) { binding.release(); @@ -114,10 +116,8 @@ public Mono execute(final AsyncWriteOperation operation, final ReadCon return Mono.from(subscriber -> clientSessionHelper.withClientSession(session, this) - .map(clientSession -> getReadWriteBinding(getContext(subscriber), primary(), readConcern, - clientSession, session == null && clientSession != null)) - .switchIfEmpty(Mono.fromCallable(() -> - getReadWriteBinding(getContext(subscriber), primary(), readConcern, session, false))) + .map(clientSession -> getReadWriteBinding(getContext(subscriber), + primary(), readConcern, clientSession, session == null)) .flatMap(binding -> Mono.create(sink -> operation.executeAsync(binding, (result, t) -> { try { @@ -133,6 +133,19 @@ public Mono execute(final AsyncWriteOperation operation, final ReadCon ); } + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) { + if (Objects.equals(timeoutSettings, newTimeoutSettings)) { + return this; + } + return new OperationExecutorImpl(mongoClient, clientSessionHelper, newTimeoutSettings, contextProvider); + } + + @Override + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + private RequestContext getContext(final Subscriber subscriber) { RequestContext context = null; if (contextProvider != null) { @@ -158,11 +171,14 @@ private void unpinServerAddressOnTransientTransactionError(@Nullable final Clien } } - private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestContext, final ReadPreference readPreference, - final ReadConcern readConcern, @Nullable final ClientSession session, final boolean ownsSession) { + private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestContext, + final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session, + final boolean ownsSession) { notNull("readPreference", readPreference); AsyncClusterAwareReadWriteBinding readWriteBinding = new AsyncClusterBinding(mongoClient.getCluster(), - getReadPreferenceForBinding(readPreference, session), readConcern, mongoClient.getSettings().getServerApi(), requestContext); + getReadPreferenceForBinding(readPreference, session), readConcern, + getOperationContext(requestContext, session, readConcern)); + Crypt crypt = mongoClient.getCrypt(); if (crypt != null) { readWriteBinding = new CryptBinding(readWriteBinding, crypt); @@ -176,6 +192,15 @@ private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestCo } } + private OperationContext getOperationContext(final RequestContext requestContext, final ClientSession session, + final ReadConcern readConcern) { + return new OperationContext( + requestContext, + new ReadConcernAwareNoOpSessionContext(readConcern), + createTimeoutContext(session, timeoutSettings), + mongoClient.getSettings().getServerApi()); + } + private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) { if (session == null) { return readPreference; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java new file mode 100644 index 00000000000..bc4da3026a9 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import reactor.core.publisher.Mono; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

        This class is not part of the public API and may be removed or changed at any time

        + */ +public final class TimeoutHelper { + private static final String DEFAULT_TIMEOUT_MESSAGE = "Operation exceeded the timeout limit."; + + private TimeoutHelper() { + //NOP + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + @Nullable final Timeout timeout) { + return collectionWithTimeout(collection, timeout, DEFAULT_TIMEOUT_MESSAGE); + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + @Nullable final Timeout timeout, + final String message) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> collection.withTimeout(0, MILLISECONDS), + ms -> collection.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return collection; + } + + public static Mono> collectionWithTimeoutMono(final MongoCollection collection, + @Nullable final Timeout timeout) { + try { + return Mono.just(collectionWithTimeout(collection, timeout)); + } catch (MongoOperationTimeoutException e) { + return Mono.error(e); + } + } + + public static Mono> collectionWithTimeoutDeferred(final MongoCollection collection, + @Nullable final Timeout timeout) { + return Mono.defer(() -> collectionWithTimeoutMono(collection, timeout)); + } + + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + @Nullable final Timeout timeout) { + return databaseWithTimeout(database, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> database.withTimeout(0, MILLISECONDS), + ms -> database.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return database; + } + + private static Mono databaseWithTimeoutMono(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + try { + return Mono.just(databaseWithTimeout(database, message, timeout)); + } catch (MongoOperationTimeoutException e) { + return Mono.error(e); + } + } + + public static Mono databaseWithTimeoutDeferred(final MongoDatabase database, + @Nullable final Timeout timeout) { + return databaseWithTimeoutDeferred(database, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + + public static Mono databaseWithTimeoutDeferred(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + return Mono.defer(() -> databaseWithTimeoutMono(database, message, timeout)); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java index 2a4b976c0dc..08df35c00f0 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java @@ -16,21 +16,27 @@ package com.mongodb.reactivestreams.client.internal.crypt; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.MongoClient; import org.bson.BsonDocument; import reactor.core.publisher.Mono; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; class CollectionInfoRetriever { + private static final String TIMEOUT_ERROR_MESSAGE = "Collection information retrieval exceeded the timeout limit."; + private final MongoClient client; CollectionInfoRetriever(final MongoClient client) { this.client = notNull("client", client); } - public Mono filter(final String databaseName, final BsonDocument filter) { - return Mono.from(client.getDatabase(databaseName).listCollections(BsonDocument.class).filter(filter).first()); + public Mono filter(final String databaseName, final BsonDocument filter, @Nullable final Timeout operationTimeout) { + return databaseWithTimeoutDeferred(client.getDatabase(databaseName), TIMEOUT_ERROR_MESSAGE, operationTimeout) + .flatMap(database -> Mono.from(database.listCollections(BsonDocument.class).filter(filter).first())); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java index d1c218cdfe9..0d15f5c970d 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java @@ -19,12 +19,15 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.MongoClientException; import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.crypt.capi.MongoCrypt; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.MongoClient; import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoDatabase; import org.bson.RawBsonDocument; import reactor.core.publisher.Mono; @@ -36,9 +39,11 @@ import static com.mongodb.internal.capi.MongoCryptHelper.createProcessBuilder; import static com.mongodb.internal.capi.MongoCryptHelper.isMongocryptdSpawningDisabled; import static com.mongodb.internal.capi.MongoCryptHelper.startProcess; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; @SuppressWarnings("UseOfProcessBuilder") class CommandMarker implements Closeable { + private static final String TIMEOUT_ERROR_MESSAGE = "Command marker exceeded the timeout limit."; @Nullable private final MongoClient client; @Nullable @@ -58,7 +63,6 @@ class CommandMarker implements Closeable { *
      • The extraOptions.cryptSharedLibRequired option is false.
      • *
      * Then mongocryptd MUST be spawned by the driver. - *

      */ CommandMarker( final MongoCrypt mongoCrypt, @@ -80,14 +84,14 @@ class CommandMarker implements Closeable { } } - Mono mark(final String databaseName, final RawBsonDocument command) { + Mono mark(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { if (client != null) { - return runCommand(databaseName, command) + return runCommand(databaseName, command, operationTimeout) .onErrorResume(Throwable.class, e -> { - if (processBuilder == null) { + if (processBuilder == null || e instanceof MongoOperationTimeoutException) { throw MongoException.fromThrowable(e); } - return Mono.fromRunnable(() -> startProcess(processBuilder)).then(runCommand(databaseName, command)); + return Mono.fromRunnable(() -> startProcess(processBuilder)).then(runCommand(databaseName, command, operationTimeout)); }) .onErrorMap(t -> new MongoClientException("Exception in encryption library: " + t.getMessage(), t)); } else { @@ -95,12 +99,14 @@ Mono mark(final String databaseName, final RawBsonDocument comm } } - private Mono runCommand(final String databaseName, final RawBsonDocument command) { + private Mono runCommand(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { assertNotNull(client); - return Mono.from(client.getDatabase(databaseName) - .withReadConcern(ReadConcern.DEFAULT) - .withReadPreference(ReadPreference.primary()) - .runCommand(command, RawBsonDocument.class)); + MongoDatabase mongoDatabase = client.getDatabase(databaseName) + .withReadConcern(ReadConcern.DEFAULT) + .withReadPreference(ReadPreference.primary()); + + return databaseWithTimeoutDeferred(mongoDatabase, TIMEOUT_ERROR_MESSAGE, operationTimeout) + .flatMap(database -> Mono.from(database.runCommand(command, RawBsonDocument.class))); } @Override diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java index e34b0571665..6d5aca27457 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java @@ -20,6 +20,7 @@ import com.mongodb.MongoException; import com.mongodb.MongoInternalException; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; @@ -32,6 +33,7 @@ import com.mongodb.internal.capi.MongoCryptHelper; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.MongoClient; import org.bson.BsonBinary; @@ -128,14 +130,14 @@ public class Crypt implements Closeable { * @param databaseName the namespace * @param command the unencrypted command */ - public Mono encrypt(final String databaseName, final RawBsonDocument command) { + public Mono encrypt(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { notNull("databaseName", databaseName); notNull("command", command); if (bypassAutoEncryption) { return Mono.fromCallable(() -> command); } - return executeStateMachine(() -> mongoCrypt.createEncryptionContext(databaseName, command), databaseName); + return executeStateMachine(() -> mongoCrypt.createEncryptionContext(databaseName, command), databaseName, operationTimeout); } /** @@ -143,9 +145,10 @@ public Mono encrypt(final String databaseName, final RawBsonDoc * * @param commandResponse the encrypted command response */ - public Mono decrypt(final RawBsonDocument commandResponse) { + public Mono decrypt(final RawBsonDocument commandResponse, @Nullable final Timeout operationTimeout) { notNull("commandResponse", commandResponse); - return executeStateMachine(() -> mongoCrypt.createDecryptionContext(commandResponse)).onErrorMap(this::wrapInClientException); + return executeStateMachine(() -> mongoCrypt.createDecryptionContext(commandResponse), operationTimeout) + .onErrorMap(this::wrapInClientException); } /** @@ -154,7 +157,7 @@ public Mono decrypt(final RawBsonDocument commandResponse) { * @param kmsProvider the KMS provider to create the data key for * @param options the data key options */ - public Mono createDataKey(final String kmsProvider, final DataKeyOptions options) { + public Mono createDataKey(final String kmsProvider, final DataKeyOptions options, @Nullable final Timeout operationTimeout) { notNull("kmsProvider", kmsProvider); notNull("options", options); return executeStateMachine(() -> @@ -163,7 +166,7 @@ public Mono createDataKey(final String kmsProvider, final DataK .keyAltNames(options.getKeyAltNames()) .masterKey(options.getMasterKey()) .keyMaterial(options.getKeyMaterial()) - .build())); + .build()), operationTimeout); } /** @@ -172,13 +175,11 @@ public Mono createDataKey(final String kmsProvider, final DataK * @param value the value to encrypt * @param options the options */ - public Mono encryptExplicitly(final BsonValue value, final EncryptOptions options) { - notNull("value", value); - notNull("options", options); - + public Mono encryptExplicitly(final BsonValue value, final EncryptOptions options, @Nullable final Timeout operationTimeout) { return executeStateMachine(() -> - mongoCrypt.createExplicitEncryptionContext(new BsonDocument("v", value), asMongoExplicitEncryptOptions(options)) - ).map(result -> result.getBinary("v")); + mongoCrypt.createExplicitEncryptionContext(new BsonDocument("v", value), asMongoExplicitEncryptOptions(options)), + operationTimeout) + .map(result -> result.getBinary("v")); } /** @@ -190,10 +191,10 @@ public Mono encryptExplicitly(final BsonValue value, final EncryptOp * @since 4.9 * @mongodb.server.release 6.2 */ - @Beta(Beta.Reason.SERVER) - public Mono encryptExpression(final BsonDocument expression, final EncryptOptions options) { + @Beta(Reason.SERVER) + public Mono encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout operationTimeout) { return executeStateMachine(() -> - mongoCrypt.createEncryptExpressionContext(new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options)) + mongoCrypt.createEncryptExpressionContext(new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options)), operationTimeout ).map(result -> result.getDocument("v")); } @@ -202,9 +203,8 @@ public Mono encryptExpression(final BsonDocument expression, final * * @param value the encrypted value */ - public Mono decryptExplicitly(final BsonBinary value) { - notNull("value", value); - return executeStateMachine(() -> mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value))) + public Mono decryptExplicitly(final BsonBinary value, @Nullable final Timeout operationTimeout) { + return executeStateMachine(() -> mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value)), operationTimeout) .map(result -> result.get("v")); } @@ -214,14 +214,14 @@ public Mono decryptExplicitly(final BsonBinary value) { * @param options the rewrap many data key options * @return the decrypted value */ - public Mono rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options) { + public Mono rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options, @Nullable final Timeout operationTimeout) { return executeStateMachine(() -> mongoCrypt.createRewrapManyDatakeyContext(filter, MongoRewrapManyDataKeyOptions .builder() .provider(options.getProvider()) .masterKey(options.getMasterKey()) - .build()) + .build()), operationTimeout ); } @@ -240,15 +240,16 @@ public void close() { } } - private Mono executeStateMachine(final Supplier cryptContextSupplier) { - return executeStateMachine(cryptContextSupplier, null); + private Mono executeStateMachine(final Supplier cryptContextSupplier, + @Nullable final Timeout operationTimeout) { + return executeStateMachine(cryptContextSupplier, null, operationTimeout); } private Mono executeStateMachine(final Supplier cryptContextSupplier, - @Nullable final String databaseName) { + @Nullable final String databaseName, @Nullable final Timeout operationTimeout) { try { MongoCryptContext cryptContext = cryptContextSupplier.get(); - return Mono.create(sink -> executeStateMachineWithSink(cryptContext, databaseName, sink)) + return Mono.create(sink -> executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout)) .onErrorMap(this::wrapInClientException) .doFinally(s -> cryptContext.close()); } catch (MongoCryptException e) { @@ -257,23 +258,23 @@ private Mono executeStateMachine(final Supplier sink) { + final MonoSink sink, @Nullable final Timeout operationTimeout) { State state = cryptContext.getState(); switch (state) { case NEED_MONGO_COLLINFO: - collInfo(cryptContext, databaseName, sink); + collInfo(cryptContext, databaseName, sink, operationTimeout); break; case NEED_MONGO_MARKINGS: - mark(cryptContext, databaseName, sink); + mark(cryptContext, databaseName, sink, operationTimeout); break; case NEED_KMS_CREDENTIALS: - fetchCredentials(cryptContext, databaseName, sink); + fetchCredentials(cryptContext, databaseName, sink, operationTimeout); break; case NEED_MONGO_KEYS: - fetchKeys(cryptContext, databaseName, sink); + fetchKeys(cryptContext, databaseName, sink, operationTimeout); break; case NEED_KMS: - decryptKeys(cryptContext, databaseName, sink); + decryptKeys(cryptContext, databaseName, sink, operationTimeout); break; case READY: sink.success(cryptContext.finish()); @@ -287,10 +288,10 @@ private void executeStateMachineWithSink(final MongoCryptContext cryptContext, @ } private void fetchCredentials(final MongoCryptContext cryptContext, @Nullable final String databaseName, - final MonoSink sink) { + final MonoSink sink, @Nullable final Timeout operationTimeout) { try { cryptContext.provideKmsProviderCredentials(MongoCryptHelper.fetchCredentials(kmsProviders, kmsProviderPropertySuppliers)); - executeStateMachineWithSink(cryptContext, databaseName, sink); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); } catch (Exception e) { sink.error(e); } @@ -298,19 +299,19 @@ private void fetchCredentials(final MongoCryptContext cryptContext, @Nullable fi private void collInfo(final MongoCryptContext cryptContext, @Nullable final String databaseName, - final MonoSink sink) { + final MonoSink sink, @Nullable final Timeout operationTimeout) { if (collectionInfoRetriever == null) { sink.error(new IllegalStateException("Missing collection Info retriever")); } else if (databaseName == null) { sink.error(new IllegalStateException("Missing database name")); } else { - collectionInfoRetriever.filter(databaseName, cryptContext.getMongoOperation()) + collectionInfoRetriever.filter(databaseName, cryptContext.getMongoOperation(), operationTimeout) .doOnSuccess(result -> { if (result != null) { cryptContext.addMongoOperationResult(result); } cryptContext.completeMongoOperation(); - executeStateMachineWithSink(cryptContext, databaseName, sink); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); }) .doOnError(t -> sink.error(MongoException.fromThrowableNonNull(t))) .subscribe(); @@ -319,17 +320,18 @@ private void collInfo(final MongoCryptContext cryptContext, private void mark(final MongoCryptContext cryptContext, @Nullable final String databaseName, - final MonoSink sink) { + final MonoSink sink, + @Nullable final Timeout operationTimeout) { if (commandMarker == null) { sink.error(wrapInClientException(new MongoInternalException("Missing command marker"))); } else if (databaseName == null) { sink.error(wrapInClientException(new IllegalStateException("Missing database name"))); } else { - commandMarker.mark(databaseName, cryptContext.getMongoOperation()) + commandMarker.mark(databaseName, cryptContext.getMongoOperation(), operationTimeout) .doOnSuccess(result -> { cryptContext.addMongoOperationResult(result); cryptContext.completeMongoOperation(); - executeStateMachineWithSink(cryptContext, databaseName, sink); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); }) .doOnError(e -> sink.error(wrapInClientException(e))) .subscribe(); @@ -338,14 +340,15 @@ private void mark(final MongoCryptContext cryptContext, private void fetchKeys(final MongoCryptContext cryptContext, @Nullable final String databaseName, - final MonoSink sink) { - keyRetriever.find(cryptContext.getMongoOperation()) + final MonoSink sink, + @Nullable final Timeout operationTimeout) { + keyRetriever.find(cryptContext.getMongoOperation(), operationTimeout) .doOnSuccess(results -> { for (BsonDocument result : results) { cryptContext.addMongoOperationResult(result); } cryptContext.completeMongoOperation(); - executeStateMachineWithSink(cryptContext, databaseName, sink); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); }) .doOnError(t -> sink.error(MongoException.fromThrowableNonNull(t))) .subscribe(); @@ -353,16 +356,17 @@ private void fetchKeys(final MongoCryptContext cryptContext, private void decryptKeys(final MongoCryptContext cryptContext, @Nullable final String databaseName, - final MonoSink sink) { + final MonoSink sink, + @Nullable final Timeout operationTimeout) { MongoKeyDecryptor keyDecryptor = cryptContext.nextKeyDecryptor(); if (keyDecryptor != null) { - keyManagementService.decryptKey(keyDecryptor) - .doOnSuccess(r -> decryptKeys(cryptContext, databaseName, sink)) + keyManagementService.decryptKey(keyDecryptor, operationTimeout) + .doOnSuccess(r -> decryptKeys(cryptContext, databaseName, sink, operationTimeout)) .doOnError(e -> sink.error(wrapInClientException(e))) .subscribe(); } else { Mono.fromRunnable(cryptContext::completeKeyDecryptors) - .doOnSuccess(r -> executeStateMachineWithSink(cryptContext, databaseName, sink)) + .doOnSuccess(r -> executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout)) .doOnError(e -> sink.error(wrapInClientException(e))) .subscribe(); } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java index ae100283ab8..1dcc8a07d62 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java @@ -17,17 +17,13 @@ package com.mongodb.reactivestreams.client.internal.crypt; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; import com.mongodb.ServerAddress; -import com.mongodb.ServerApi; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding; import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; /** *

      This class is not part of the public API and may be removed or changed at any time

      @@ -58,22 +54,6 @@ public void getWriteConnectionSource(final SingleResultCallback callback) { @@ -144,22 +123,6 @@ public ServerDescription getServerDescription() { return wrapped.getServerDescription(); } - @Override - public SessionContext getSessionContext() { - return wrapped.getSessionContext(); - } - - @Override - @Nullable - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); - } - @Override public OperationContext getOperationContext() { return wrapped.getOperationContext(); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java index 276ad0be146..f7466c14828 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java @@ -20,12 +20,13 @@ import com.mongodb.ReadPreference; import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.binding.BindingContext; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.MessageSettings; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.SplittablePayload; import com.mongodb.internal.connection.SplittablePayloadBsonWriter; +import com.mongodb.internal.time.Timeout; import com.mongodb.internal.validator.MappedFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonBinaryReader; @@ -90,16 +91,17 @@ public ConnectionDescription getDescription() { @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context, final SingleResultCallback callback) { + final OperationContext operationContext, final SingleResultCallback callback) { commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, - context, true, null, null, callback); + operationContext, true, null, null, callback); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context, final boolean responseExpected, @Nullable final SplittablePayload payload, - @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback callback) { + final OperationContext operationContext, final boolean responseExpected, + @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, + final SingleResultCallback callback) { if (serverIsLessThanVersionFourDotTwo(wrapped.getDescription())) { callback.onResult(null, new MongoClientException("Auto-encryption requires a minimum MongoDB version of 4.2")); @@ -116,12 +118,14 @@ public void commandAsync(final String database, final BsonDocument command, : new SplittablePayloadBsonWriter(bsonBinaryWriter, bsonOutput, createSplittablePayloadMessageSettings(), payload, MAX_SPLITTABLE_DOCUMENT_SIZE); + Timeout operationTimeout = operationContext.getTimeoutContext().getTimeout(); + getEncoder(command).encode(writer, command, EncoderContext.builder().build()); - crypt.encrypt(database, new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize())) + crypt.encrypt(database, new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout) .flatMap((Function>) encryptedCommand -> Mono.create(sink -> wrapped.commandAsync(database, encryptedCommand, commandFieldNameValidator, readPreference, - new RawBsonDocumentCodec(), context, responseExpected, null, null, sinkToCallback(sink)))) - .flatMap(crypt::decrypt) + new RawBsonDocumentCodec(), operationContext, responseExpected, null, null, sinkToCallback(sink)))) + .flatMap(rawBsonDocument -> crypt.decrypt(rawBsonDocument, operationTimeout)) .map(decryptedResponse -> commandResultDecoder.decode(new BsonBinaryReader(decryptedResponse.getByteBuffer().asNIO()), DecoderContext.builder().build()) diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java index 0e493f8c364..d59b1e03696 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java @@ -25,7 +25,6 @@ import com.mongodb.crypt.capi.MongoCrypts; import com.mongodb.reactivestreams.client.MongoClient; import com.mongodb.reactivestreams.client.MongoClients; -import com.mongodb.reactivestreams.client.internal.MongoClientImpl; import javax.net.ssl.SSLContext; import java.security.NoSuchAlgorithmException; @@ -41,11 +40,11 @@ public final class Crypts { private Crypts() { } - public static Crypt createCrypt(final MongoClientImpl client, final AutoEncryptionSettings settings) { + public static Crypt createCrypt(final MongoClientSettings mongoClientSettings, final AutoEncryptionSettings autoEncryptionSettings) { MongoClient sharedInternalClient = null; - MongoClientSettings keyVaultMongoClientSettings = settings.getKeyVaultMongoClientSettings(); - if (keyVaultMongoClientSettings == null || !settings.isBypassAutoEncryption()) { - MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(client.getSettings()) + MongoClientSettings keyVaultMongoClientSettings = autoEncryptionSettings.getKeyVaultMongoClientSettings(); + if (keyVaultMongoClientSettings == null || !autoEncryptionSettings.isBypassAutoEncryption()) { + MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(mongoClientSettings) .applyToConnectionPoolSettings(builder -> builder.minSize(0)) .autoEncryptionSettings(null) .build(); @@ -53,16 +52,16 @@ public static Crypt createCrypt(final MongoClientImpl client, final AutoEncrypti } MongoClient keyVaultClient = keyVaultMongoClientSettings == null ? sharedInternalClient : MongoClients.create(keyVaultMongoClientSettings); - MongoCrypt mongoCrypt = MongoCrypts.create(createMongoCryptOptions(settings)); + MongoCrypt mongoCrypt = MongoCrypts.create(createMongoCryptOptions(autoEncryptionSettings)); return new Crypt( mongoCrypt, - createKeyRetriever(keyVaultClient, settings.getKeyVaultNamespace()), - createKeyManagementService(settings.getKmsProviderSslContextMap()), - settings.getKmsProviders(), - settings.getKmsProviderPropertySuppliers(), - settings.isBypassAutoEncryption(), - settings.isBypassAutoEncryption() ? null : new CollectionInfoRetriever(sharedInternalClient), - new CommandMarker(mongoCrypt, settings), + createKeyRetriever(keyVaultClient, autoEncryptionSettings.getKeyVaultNamespace()), + createKeyManagementService(autoEncryptionSettings.getKmsProviderSslContextMap()), + autoEncryptionSettings.getKmsProviders(), + autoEncryptionSettings.getKmsProviderPropertySuppliers(), + autoEncryptionSettings.isBypassAutoEncryption(), + autoEncryptionSettings.isBypassAutoEncryption() ? null : new CollectionInfoRetriever(sharedInternalClient), + new CommandMarker(mongoCrypt, autoEncryptionSettings), sharedInternalClient, keyVaultClient); } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java index 887129b24e1..465ffc02e80 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java @@ -16,41 +16,53 @@ package com.mongodb.reactivestreams.client.internal.crypt; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoSocketWriteTimeoutException; import com.mongodb.ServerAddress; import com.mongodb.connection.AsyncCompletionHandler; import com.mongodb.connection.SocketSettings; import com.mongodb.connection.SslSettings; import com.mongodb.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.connection.AsynchronousChannelStream; import com.mongodb.internal.connection.DefaultInetAddressResolver; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.Stream; import com.mongodb.internal.connection.StreamFactory; import com.mongodb.internal.connection.TlsChannelStreamFactoryFactory; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.ByteBuf; import org.bson.ByteBufNIO; +import org.jetbrains.annotations.NotNull; import reactor.core.publisher.Mono; import reactor.core.publisher.MonoSink; import javax.net.ssl.SSLContext; import java.io.Closeable; import java.nio.channels.CompletionHandler; +import java.nio.channels.InterruptedByTimeoutException; import java.util.List; import java.util.Map; import static java.util.Collections.singletonList; import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.assertions.Assertions.assertTrue; class KeyManagementService implements Closeable { private static final Logger LOGGER = Loggers.getLogger("client"); + private static final String TIMEOUT_ERROR_MESSAGE = "KMS key decryption exceeded the timeout limit."; private final Map kmsProviderSslContextMap; private final int timeoutMillis; private final TlsChannelStreamFactoryFactory tlsChannelStreamFactoryFactory; KeyManagementService(final Map kmsProviderSslContextMap, final int timeoutMillis) { + assertTrue("timeoutMillis > 0", timeoutMillis > 0); this.kmsProviderSslContextMap = kmsProviderSslContextMap; this.tlsChannelStreamFactoryFactory = new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver()); this.timeoutMillis = timeoutMillis; @@ -60,7 +72,7 @@ public void close() { tlsChannelStreamFactoryFactory.close(); } - Mono decryptKey(final MongoKeyDecryptor keyDecryptor) { + Mono decryptKey(final MongoKeyDecryptor keyDecryptor, @Nullable final Timeout operationTimeout) { SocketSettings socketSettings = SocketSettings.builder() .connectTimeout(timeoutMillis, MILLISECONDS) .readTimeout(timeoutMillis, MILLISECONDS) @@ -74,43 +86,47 @@ Mono decryptKey(final MongoKeyDecryptor keyDecryptor) { return Mono.create(sink -> { Stream stream = streamFactory.create(serverAddress); - stream.openAsync(new AsyncCompletionHandler() { + OperationContext operationContext = createOperationContext(operationTimeout, socketSettings); + stream.openAsync(operationContext, new AsyncCompletionHandler() { @Override public void completed(@Nullable final Void ignored) { - streamWrite(stream, keyDecryptor, sink); + streamWrite(stream, keyDecryptor, operationContext, sink); } @Override public void failed(final Throwable t) { stream.close(); - sink.error(t); + handleError(t, operationContext, sink); } }); }).onErrorMap(this::unWrapException); } - private void streamWrite(final Stream stream, final MongoKeyDecryptor keyDecryptor, final MonoSink sink) { + private void streamWrite(final Stream stream, final MongoKeyDecryptor keyDecryptor, + final OperationContext operationContext, final MonoSink sink) { List byteBufs = singletonList(new ByteBufNIO(keyDecryptor.getMessage())); - stream.writeAsync(byteBufs, new AsyncCompletionHandler() { + stream.writeAsync(byteBufs, operationContext, new AsyncCompletionHandler() { @Override public void completed(@Nullable final Void aVoid) { - streamRead(stream, keyDecryptor, sink); + streamRead(stream, keyDecryptor, operationContext, sink); } @Override public void failed(final Throwable t) { stream.close(); - sink.error(t); + handleError(t, operationContext, sink); } }); } - private void streamRead(final Stream stream, final MongoKeyDecryptor keyDecryptor, final MonoSink sink) { + private void streamRead(final Stream stream, final MongoKeyDecryptor keyDecryptor, + final OperationContext operationContext, final MonoSink sink) { int bytesNeeded = keyDecryptor.bytesNeeded(); if (bytesNeeded > 0) { AsynchronousChannelStream asyncStream = (AsynchronousChannelStream) stream; ByteBuf buffer = asyncStream.getBuffer(bytesNeeded); - asyncStream.getChannel().read(buffer.asNIO(), asyncStream.getSettings().getReadTimeout(MILLISECONDS), MILLISECONDS, null, + long readTimeoutMS = operationContext.getTimeoutContext().getReadTimeoutMS(); + asyncStream.getChannel().read(buffer.asNIO(), readTimeoutMS, MILLISECONDS, null, new CompletionHandler() { @Override @@ -119,7 +135,7 @@ public void completed(final Integer integer, final Void aVoid) { try { keyDecryptor.feed(buffer.asNIO()); buffer.release(); - streamRead(stream, keyDecryptor, sink); + streamRead(stream, keyDecryptor, operationContext, sink); } catch (Throwable t) { sink.error(t); } @@ -129,7 +145,7 @@ public void completed(final Integer integer, final Void aVoid) { public void failed(final Throwable t, final Void aVoid) { buffer.release(); stream.close(); - sink.error(t); + handleError(t, operationContext, sink); } }); } else { @@ -138,7 +154,49 @@ public void failed(final Throwable t, final Void aVoid) { } } + private static void handleError(final Throwable t, final OperationContext operationContext, final MonoSink sink) { + if (isTimeoutException(t) && operationContext.getTimeoutContext().hasTimeoutMS()) { + sink.error(TimeoutContext.createMongoTimeoutException(TIMEOUT_ERROR_MESSAGE, t)); + } else { + sink.error(t); + } + } + + private OperationContext createOperationContext(@Nullable final Timeout operationTimeout, final SocketSettings socketSettings) { + TimeoutSettings timeoutSettings; + if (operationTimeout == null) { + timeoutSettings = createTimeoutSettings(socketSettings, null); + } else { + timeoutSettings = operationTimeout.call(MILLISECONDS, + () -> { + throw new AssertionError("operationTimeout cannot be infinite"); + }, + (ms) -> createTimeoutSettings(socketSettings, ms), + () -> { + throw new MongoOperationTimeoutException(TIMEOUT_ERROR_MESSAGE); + }); + } + return OperationContext.simpleOperationContext(new TimeoutContext(timeoutSettings)); + } + + @NotNull + private static TimeoutSettings createTimeoutSettings(final SocketSettings socketSettings, + @Nullable final Long ms) { + return new TimeoutSettings( + 0, + socketSettings.getConnectTimeout(MILLISECONDS), + socketSettings.getReadTimeout(MILLISECONDS), + ms, + 0); + } + private Throwable unWrapException(final Throwable t) { return t instanceof MongoSocketException ? t.getCause() : t; } + + private static boolean isTimeoutException(final Throwable t) { + return t instanceof MongoSocketReadTimeoutException + || t instanceof MongoSocketWriteTimeoutException + || t instanceof InterruptedByTimeoutException; + } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java index 74dca9e6f60..23e3a06eff0 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java @@ -18,7 +18,10 @@ import com.mongodb.MongoNamespace; import com.mongodb.ReadConcern; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoCollection; import org.bson.BsonDocument; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -26,8 +29,10 @@ import java.util.List; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; class KeyRetriever { + private static final String TIMEOUT_ERROR_MESSAGE = "Key retrieval exceeded the timeout limit."; private final MongoClient client; private final MongoNamespace namespace; @@ -36,11 +41,14 @@ class KeyRetriever { this.namespace = notNull("namespace", namespace); } - public Mono> find(final BsonDocument keyFilter) { - return Flux.from( - client.getDatabase(namespace.getDatabaseName()).getCollection(namespace.getCollectionName(), BsonDocument.class) - .withReadConcern(ReadConcern.MAJORITY) - .find(keyFilter) - ).collectList(); + public Mono> find(final BsonDocument keyFilter, @Nullable final Timeout operationTimeout) { + return Flux.defer(() -> { + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class); + + return collectionWithTimeout(collection, operationTimeout, TIMEOUT_ERROR_MESSAGE) + .withReadConcern(ReadConcern.MAJORITY) + .find(keyFilter); + }).collectList(); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java index d92f68154dc..1e81db2045e 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java @@ -22,6 +22,7 @@ import com.mongodb.client.gridfs.model.GridFSDownloadOptions; import com.mongodb.client.gridfs.model.GridFSFile; import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.internal.time.Timeout; import com.mongodb.reactivestreams.client.ClientSession; import com.mongodb.reactivestreams.client.MongoClients; import com.mongodb.reactivestreams.client.MongoCollection; @@ -39,6 +40,8 @@ import org.reactivestreams.Publisher; import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createDeletePublisher; @@ -47,6 +50,7 @@ import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createGridFSFindPublisher; import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createGridFSUploadPublisher; import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createRenamePublisher; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; @@ -72,7 +76,7 @@ public GridFSBucketImpl(final MongoDatabase database, final String bucketName) { getChunksCollection(database, bucketName)); } - GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection filesCollection, + private GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection filesCollection, final MongoCollection chunksCollection) { this.bucketName = notNull("bucketName", bucketName); this.chunkSizeBytes = chunkSizeBytes; @@ -115,6 +119,12 @@ public ReadConcern getReadConcern() { return filesCollection.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = filesCollection.getTimeout(MILLISECONDS); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + @Override public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) { return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection, chunksCollection); @@ -141,6 +151,12 @@ public GridFSBucket withReadConcern(final ReadConcern readConcern) { chunksCollection.withReadConcern(readConcern)); } + @Override + public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withTimeout(timeout, timeUnit), + chunksCollection.withTimeout(timeout, timeUnit)); + } + @Override public GridFSUploadPublisher uploadFromPublisher(final String filename, final Publisher source) { return uploadFromPublisher(filename, source, new GridFSUploadOptions()); @@ -202,8 +218,10 @@ public GridFSDownloadPublisher downloadToPublisher(final ObjectId id) { @Override public GridFSDownloadPublisher downloadToPublisher(final BsonValue id) { - return createGridFSDownloadPublisher(chunksCollection, null, - createGridFSFindPublisher(filesCollection, null, new BsonDocument("_id", id))); + + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, null, new BsonDocument("_id", id), operationTimeout); + return createGridFSDownloadPublisher(chunksCollection, null, findPublisherCreator); } @Override @@ -213,8 +231,9 @@ public GridFSDownloadPublisher downloadToPublisher(final String filename) { @Override public GridFSDownloadPublisher downloadToPublisher(final String filename, final GridFSDownloadOptions options) { - return createGridFSDownloadPublisher(chunksCollection, null, - createGridFSFindPublisher(filesCollection, null, filename, options)); + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, null, filename, options, operationTimeout); + return createGridFSDownloadPublisher(chunksCollection, null, findPublisherCreator); } @Override @@ -224,8 +243,9 @@ public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSes @Override public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession, final BsonValue id) { - return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), - createGridFSFindPublisher(filesCollection, clientSession, new BsonDocument("_id", id))); + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, clientSession, new BsonDocument("_id", id), operationTimeout); + return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), findPublisherCreator); } @Override @@ -237,8 +257,11 @@ public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSes public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession, final String filename, final GridFSDownloadOptions options) { - return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), - createGridFSFindPublisher(filesCollection, clientSession, filename, options)); + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, clientSession, filename, + options, operationTimeout); + + return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), findPublisherCreator); } @Override diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java index e80d5dc3902..bedc6552957 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java @@ -18,11 +18,13 @@ import com.mongodb.MongoGridFSException; import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; import com.mongodb.reactivestreams.client.FindPublisher; import com.mongodb.reactivestreams.client.MongoCollection; import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; import org.bson.Document; import org.bson.types.Binary; import org.reactivestreams.Publisher; @@ -35,30 +37,32 @@ import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.startTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      This class is not part of the public API and may be removed or changed at any time

      */ public class GridFSDownloadPublisherImpl implements GridFSDownloadPublisher { + private static final String TIMEOUT_ERROR_MESSAGE = "Finding chunks exceeded the timeout limit."; private final ClientSession clientSession; - private final Mono gridFSFileMono; + private final Function gridFSFileMono; private final MongoCollection chunksCollection; private Integer bufferSizeBytes; private volatile GridFSFile fileInfo; + @Nullable + private final Long timeoutMs; public GridFSDownloadPublisherImpl(@Nullable final ClientSession clientSession, - final Mono gridFSFileMono, + final Function gridFSFilePublisherCreator, final MongoCollection chunksCollection) { this.clientSession = clientSession; - this.gridFSFileMono = notNull("gridFSFileMono", gridFSFileMono) - .doOnSuccess(s -> { - if (s == null) { - throw new MongoGridFSException("File not found"); - } - }); + this.gridFSFileMono = notNull("gridFSFilePublisherCreator", gridFSFilePublisherCreator); this.chunksCollection = notNull("chunksCollection", chunksCollection); + this.timeoutMs = chunksCollection.getTimeout(MILLISECONDS); } @Override @@ -66,7 +70,8 @@ public Publisher getGridFSFile() { if (fileInfo != null) { return Mono.fromCallable(() -> fileInfo); } - return gridFSFileMono.doOnNext(i -> fileInfo = i); + return Mono.from(gridFSFileMono.apply(startTimeout(timeoutMs))) + .doOnNext(gridFSFile -> fileInfo = gridFSFile); } @Override @@ -77,17 +82,25 @@ public GridFSDownloadPublisher bufferSizeBytes(final int bufferSizeBytes) { @Override public void subscribe(final Subscriber subscriber) { - gridFSFileMono.flatMapMany((Function>) this::getChunkPublisher) - .subscribe(subscriber); + Flux.defer(()-> { + Timeout operationTimeout = startTimeout(timeoutMs); + return Mono.from(gridFSFileMono.apply(operationTimeout)) + .doOnSuccess(gridFSFile -> { + if (gridFSFile == null) { + throw new MongoGridFSException("File not found"); + } + fileInfo = gridFSFile; + }).flatMapMany((Function>) gridFSFile -> getChunkPublisher(gridFSFile, operationTimeout)); + }).subscribe(subscriber); } - private Flux getChunkPublisher(final GridFSFile gridFSFile) { + private Flux getChunkPublisher(final GridFSFile gridFSFile, @Nullable final Timeout timeout) { Document filter = new Document("files_id", gridFSFile.getId()); FindPublisher chunkPublisher; if (clientSession != null) { - chunkPublisher = chunksCollection.find(clientSession, filter); + chunkPublisher = collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).find(clientSession, filter); } else { - chunkPublisher = chunksCollection.find(filter); + chunkPublisher = collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).find(filter); } AtomicInteger chunkCounter = new AtomicInteger(0); @@ -126,5 +139,4 @@ private Flux getChunkPublisher(final GridFSFile gridFSFile) { }); return bufferSizeBytes == null ? byteBufferFlux : new ResizingByteBufferFlux(byteBufferFlux, bufferSizeBytes); } - } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java index 4b2878d72e3..166abca6a0b 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java @@ -17,11 +17,13 @@ package com.mongodb.reactivestreams.client.internal.gridfs; import com.mongodb.MongoGridFSException; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.gridfs.model.GridFSDownloadOptions; import com.mongodb.client.gridfs.model.GridFSFile; import com.mongodb.client.gridfs.model.GridFSUploadOptions; -import com.mongodb.client.result.DeleteResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; import com.mongodb.reactivestreams.client.FindPublisher; @@ -36,9 +38,14 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutMono; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred; import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      This class is not part of the public API and may be removed or changed at any time

      @@ -68,10 +75,10 @@ public static GridFSUploadPublisherImpl createGridFSUploadPublisher( public static GridFSDownloadPublisherImpl createGridFSDownloadPublisher( final MongoCollection chunksCollection, @Nullable final ClientSession clientSession, - final GridFSFindPublisher publisher) { + final Function publisher) { notNull("chunksCollection", chunksCollection); - notNull("publisher", publisher); - return new GridFSDownloadPublisherImpl(clientSession, Mono.from(publisher), chunksCollection); + notNull("gridFSFileMono", publisher); + return new GridFSDownloadPublisherImpl(clientSession, publisher, chunksCollection); } public static GridFSFindPublisher createGridFSFindPublisher( @@ -82,11 +89,21 @@ public static GridFSFindPublisher createGridFSFindPublisher( return new GridFSFindPublisherImpl(createFindPublisher(filesCollection, clientSession, filter)); } + public static GridFSFindPublisher createGridFSFindPublisher( + final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + @Nullable final Bson filter, + @Nullable final Timeout operationTimeout) { + notNull("filesCollection", filesCollection); + return new GridFSFindPublisherImpl(createFindPublisher(filesCollection, clientSession, filter, operationTimeout)); + } + public static GridFSFindPublisher createGridFSFindPublisher( final MongoCollection filesCollection, @Nullable final ClientSession clientSession, final String filename, - final GridFSDownloadOptions options) { + final GridFSDownloadOptions options, + @Nullable final Timeout operationTimeout) { notNull("filesCollection", filesCollection); notNull("filename", filename); notNull("options", options); @@ -102,10 +119,32 @@ public static GridFSFindPublisher createGridFSFindPublisher( sort = -1; } - return createGridFSFindPublisher(filesCollection, clientSession, new Document("filename", filename)).skip(skip) + return createGridFSFindPublisher(filesCollection, clientSession, new Document("filename", filename), operationTimeout).skip(skip) .sort(new Document("uploadDate", sort)); } + public static FindPublisher createFindPublisher( + final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + @Nullable final Bson filter, + @Nullable final Timeout operationTimeout) { + notNull("filesCollection", filesCollection); + FindPublisher publisher; + if (clientSession == null) { + publisher = collectionWithTimeout(filesCollection, operationTimeout).find(); + } else { + publisher = collectionWithTimeout(filesCollection, operationTimeout).find(clientSession); + } + + if (filter != null) { + publisher = publisher.filter(filter); + } + if (operationTimeout != null) { + publisher.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } + return publisher; + } + public static FindPublisher createFindPublisher( final MongoCollection filesCollection, @Nullable final ClientSession clientSession, @@ -117,10 +156,12 @@ public static FindPublisher createFindPublisher( } else { publisher = filesCollection.find(clientSession); } - if (filter != null) { publisher = publisher.filter(filter); } + if (filesCollection.getTimeout(MILLISECONDS) != null) { + publisher.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } return publisher; } @@ -132,24 +173,29 @@ public static Publisher createDeletePublisher(final MongoCollection fileDeletePublisher; - if (clientSession == null) { - fileDeletePublisher = filesCollection.deleteOne(filter); - } else { - fileDeletePublisher = filesCollection.deleteOne(clientSession, filter); - } - return Mono.from(fileDeletePublisher) - .flatMap(deleteResult -> { + + return Mono.defer(()-> { + Timeout operationTimeout = startTimeout(filesCollection.getTimeout(MILLISECONDS)); + return collectionWithTimeoutMono(filesCollection, operationTimeout) + .flatMap(wrappedCollection -> { + if (clientSession == null) { + return Mono.from(wrappedCollection.deleteOne(filter)); + } else { + return Mono.from(wrappedCollection.deleteOne(clientSession, filter)); + } + }).flatMap(deleteResult -> { if (deleteResult.wasAcknowledged() && deleteResult.getDeletedCount() == 0) { - throw new MongoGridFSException(format("No file found with the ObjectId: %s", id)); + return Mono.error(new MongoGridFSException(format("No file found with the ObjectId: %s", id))); } + return collectionWithTimeoutMono(chunksCollection, operationTimeout); + }).flatMap(wrappedCollection -> { if (clientSession == null) { - return Mono.from(chunksCollection.deleteMany(new BsonDocument("files_id", id))); + return Mono.from(wrappedCollection.deleteMany(new BsonDocument("files_id", id))); } else { - return Mono.from(chunksCollection.deleteMany(clientSession, new BsonDocument("files_id", id))); + return Mono.from(wrappedCollection.deleteMany(clientSession, new BsonDocument("files_id", id))); } - }) - .flatMap(i -> Mono.empty()); + }).then(); + }); } public static Publisher createRenamePublisher(final MongoCollection filesCollection, @@ -180,20 +226,30 @@ public static Publisher createRenamePublisher(final MongoCollection createDropPublisher(final MongoCollection filesCollection, final MongoCollection chunksCollection, @Nullable final ClientSession clientSession) { - Publisher filesDropPublisher; - if (clientSession == null) { - filesDropPublisher = filesCollection.drop(); - } else { - filesDropPublisher = filesCollection.drop(clientSession); - } - Publisher chunksDropPublisher; - if (clientSession == null) { - chunksDropPublisher = chunksCollection.drop(); - } else { - chunksDropPublisher = chunksCollection.drop(clientSession); - } + return Mono.defer(() -> { + Timeout operationTimeout = startTimeout(filesCollection.getTimeout(MILLISECONDS)); + return collectionWithTimeoutMono(filesCollection, operationTimeout) + .flatMap(wrappedCollection -> { + if (clientSession == null) { + return Mono.from(wrappedCollection.drop()); + } else { + return Mono.from(wrappedCollection.drop(clientSession)); + } + }).then(collectionWithTimeoutDeferred(chunksCollection, operationTimeout)) + .flatMap(wrappedCollection -> { + if (clientSession == null) { + return Mono.from(wrappedCollection.drop()); + } else { + return Mono.from(wrappedCollection.drop(clientSession)); + } + + }); + }); + } - return Mono.from(filesDropPublisher).then(Mono.from(chunksDropPublisher)); + @Nullable + private static Timeout startTimeout(@Nullable final Long timeoutMs) { + return timeoutMs == null ? null : TimeoutContext.startTimeout(timeoutMs); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java index da6cbdcbce8..a45d369c676 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java @@ -17,13 +17,14 @@ package com.mongodb.reactivestreams.client.internal.gridfs; import com.mongodb.MongoGridFSException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.client.gridfs.model.GridFSFile; import com.mongodb.client.result.DeleteResult; import com.mongodb.client.result.InsertOneResult; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; -import com.mongodb.reactivestreams.client.FindPublisher; -import com.mongodb.reactivestreams.client.ListIndexesPublisher; import com.mongodb.reactivestreams.client.MongoCollection; import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher; import org.bson.BsonValue; @@ -41,11 +42,14 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Consumer; import java.util.function.Function; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred; +import static java.time.Duration.ofMillis; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** @@ -53,6 +57,7 @@ */ public final class GridFSUploadPublisherImpl implements GridFSUploadPublisher { + private static final String TIMEOUT_ERROR_MESSAGE = "Saving chunks exceeded the timeout limit."; private static final Document PROJECTION = new Document("_id", 1); private static final Document FILES_INDEX = new Document("filename", 1).append("uploadDate", 1); private static final Document CHUNKS_INDEX = new Document("files_id", 1).append("n", 1); @@ -64,6 +69,8 @@ public final class GridFSUploadPublisherImpl implements GridFSUploadPublisher source; + @Nullable + private final Long timeoutMs; public GridFSUploadPublisherImpl(@Nullable final ClientSession clientSession, final MongoCollection filesCollection, @@ -81,6 +88,7 @@ public GridFSUploadPublisherImpl(@Nullable final ClientSession clientSession, this.chunkSizeBytes = chunkSizeBytes; this.metadata = metadata; this.source = source; + this.timeoutMs = filesCollection.getTimeout(MILLISECONDS); } @Override @@ -98,31 +106,23 @@ public BsonValue getId() { @Override public void subscribe(final Subscriber s) { - Mono.create(sink -> { + Mono.defer(() -> { AtomicBoolean terminated = new AtomicBoolean(false); - sink.onCancel(() -> createCancellationMono(terminated).subscribe()); - - Consumer errorHandler = e -> createCancellationMono(terminated) - .doOnError(i -> sink.error(e)) - .doOnSuccess(i -> sink.error(e)) - .subscribe(); - - Consumer saveFileDataMono = l -> createSaveFileDataMono(terminated, l) - .doOnError(errorHandler) - .doOnSuccess(i -> sink.success()) - .subscribe(); - - Consumer saveChunksMono = i -> createSaveChunksMono(terminated) - .doOnError(errorHandler) - .doOnSuccess(saveFileDataMono) - .subscribe(); - - createCheckAndCreateIndexesMono() - .doOnError(errorHandler) - .doOnSuccess(saveChunksMono) - .subscribe(); - }) - .subscribe(s); + Timeout timeout = TimeoutContext.startTimeout(timeoutMs); + return createCheckAndCreateIndexesMono(timeout) + .then(createSaveChunksMono(terminated, timeout)) + .flatMap(lengthInBytes -> createSaveFileDataMono(terminated, lengthInBytes, timeout)) + .onErrorResume(originalError -> + createCancellationMono(terminated, timeout) + .onErrorMap(cancellationError -> { + // Timeout exception might occur during cancellation. It gets suppressed. + originalError.addSuppressed(cancellationError); + return originalError; + }) + .then(Mono.error(originalError))) + .doOnCancel(() -> createCancellationMono(terminated, timeout).subscribe()) + .then(); + }).subscribe(s); } public GridFSUploadPublisher withObjectId() { @@ -148,47 +148,50 @@ public void subscribe(final Subscriber subscriber) { }; } - private Mono createCheckAndCreateIndexesMono() { - MongoCollection collection = filesCollection.withDocumentClass(Document.class).withReadPreference(primary()); - FindPublisher findPublisher; - if (clientSession != null) { - findPublisher = collection.find(clientSession); - } else { - findPublisher = collection.find(); - } + private Mono createCheckAndCreateIndexesMono(@Nullable final Timeout timeout) { AtomicBoolean collectionExists = new AtomicBoolean(false); - - return Mono.create(sink -> Mono.from(findPublisher.projection(PROJECTION).first()) - .subscribe( + return Mono.create(sink -> findAllInCollection(filesCollection, timeout).subscribe( d -> collectionExists.set(true), sink::error, () -> { if (collectionExists.get()) { sink.success(); } else { - checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX) - .doOnError(sink::error) - .doOnSuccess(i -> { - checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX) - .doOnError(sink::error) - .doOnSuccess(sink::success) - .subscribe(); - }) - .subscribe(); + checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX, timeout) + .doOnSuccess(i -> checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX, timeout) + .subscribe(unused -> {}, sink::error, sink::success)) + .subscribe(unused -> {}, sink::error); } }) ); } - private Mono hasIndex(final MongoCollection collection, final Document index) { - ListIndexesPublisher listIndexesPublisher; - if (clientSession != null) { - listIndexesPublisher = collection.listIndexes(clientSession); - } else { - listIndexesPublisher = collection.listIndexes(); - } + private Mono findAllInCollection(final MongoCollection collection, @Nullable final Timeout timeout) { + return collectionWithTimeoutDeferred(collection + .withDocumentClass(Document.class) + .withReadPreference(primary()), timeout) + .flatMap(wrappedCollection -> { + if (clientSession != null) { + return Mono.from(wrappedCollection.find(clientSession) + .projection(PROJECTION) + .first()); + } else { + return Mono.from(wrappedCollection.find() + .projection(PROJECTION) + .first()); + } + }); + } - return Flux.from(listIndexesPublisher) + private Mono hasIndex(final MongoCollection collection, final Document index, @Nullable final Timeout timeout) { + return collectionWithTimeoutDeferred(collection, timeout) + .map(wrappedCollection -> { + if (clientSession != null) { + return wrappedCollection.listIndexes(clientSession); + } else { + return wrappedCollection.listIndexes(); + } + }).flatMapMany(Flux::from) .collectList() .map(indexes -> { boolean hasIndex = false; @@ -208,25 +211,28 @@ private Mono hasIndex(final MongoCollection collection, final Do }); } - private Mono checkAndCreateIndex(final MongoCollection collection, final Document index) { - return hasIndex(collection, index).flatMap(hasIndex -> { + private Mono checkAndCreateIndex(final MongoCollection collection, final Document index, @Nullable final Timeout timeout) { + return hasIndex(collection, index, timeout).flatMap(hasIndex -> { if (!hasIndex) { - return createIndexMono(collection, index).flatMap(s -> Mono.empty()); + return createIndexMono(collection, index, timeout).flatMap(s -> Mono.empty()); } else { return Mono.empty(); } }); } - private Mono createIndexMono(final MongoCollection collection, final Document index) { - return Mono.from(clientSession == null ? collection.createIndex(index) : collection.createIndex(clientSession, index)); + private Mono createIndexMono(final MongoCollection collection, final Document index, @Nullable final Timeout timeout) { + return collectionWithTimeoutDeferred(collection, timeout).flatMap(wrappedCollection -> + Mono.from(clientSession == null ? wrappedCollection.createIndex(index) : wrappedCollection.createIndex(clientSession, index)) + ); } - private Mono createSaveChunksMono(final AtomicBoolean terminated) { + private Mono createSaveChunksMono(final AtomicBoolean terminated, @Nullable final Timeout timeout) { return Mono.create(sink -> { AtomicLong lengthInBytes = new AtomicLong(0); AtomicInteger chunkIndex = new AtomicInteger(0); new ResizingByteBufferFlux(source, chunkSizeBytes) + .takeUntilOther(createMonoTimer(timeout)) .flatMap((Function>) byteBuffer -> { if (terminated.get()) { return Mono.empty(); @@ -246,36 +252,64 @@ private Mono createSaveChunksMono(final AtomicBoolean terminated) { .append("n", chunkIndex.getAndIncrement()) .append("data", data); - return clientSession == null ? chunksCollection.insertOne(chunkDocument) - : chunksCollection.insertOne(clientSession, chunkDocument); + if (clientSession == null) { + return collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(chunkDocument); + } else { + return collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(clientSession, + chunkDocument); + } + }) .subscribe(null, sink::error, () -> sink.success(lengthInBytes.get())); }); } - private Mono createSaveFileDataMono(final AtomicBoolean terminated, final long lengthInBytes) { + /** + * Creates a Mono that emits a {@link MongoOperationTimeoutException} after the specified timeout. + * + * @param timeout - remaining timeout. + * @return Mono that emits a {@link MongoOperationTimeoutException}. + */ + private static Mono createMonoTimer(final @Nullable Timeout timeout) { + return Timeout.nullAsInfinite(timeout).call(MILLISECONDS, + () -> Mono.never(), + (ms) -> Mono.delay(ofMillis(ms)).then(createTimeoutMonoError()), + () -> createTimeoutMonoError()); + } + + private static Mono createTimeoutMonoError() { + return Mono.error(TimeoutContext.createMongoTimeoutException( + "GridFS waiting for data from the source Publisher exceeded the timeout limit.")); + } + + private Mono createSaveFileDataMono(final AtomicBoolean terminated, + final long lengthInBytes, + @Nullable final Timeout timeout) { + Mono> filesCollectionMono = collectionWithTimeoutDeferred(filesCollection, timeout); if (terminated.compareAndSet(false, true)) { GridFSFile gridFSFile = new GridFSFile(fileId, filename, lengthInBytes, chunkSizeBytes, new Date(), metadata); if (clientSession != null) { - return Mono.from(filesCollection.insertOne(clientSession, gridFSFile)); + return filesCollectionMono.flatMap(collection -> Mono.from(collection.insertOne(clientSession, gridFSFile))); } else { - return Mono.from(filesCollection.insertOne(gridFSFile)); + return filesCollectionMono.flatMap(collection -> Mono.from(collection.insertOne(gridFSFile))); } } else { return Mono.empty(); } } - private Mono createCancellationMono(final AtomicBoolean terminated) { + private Mono createCancellationMono(final AtomicBoolean terminated, @Nullable final Timeout timeout) { + Mono> chunksCollectionMono = collectionWithTimeoutDeferred(chunksCollection, timeout); if (terminated.compareAndSet(false, true)) { if (clientSession != null) { - return Mono.from(chunksCollection.deleteMany(clientSession, new Document("files_id", fileId))); + return chunksCollectionMono.flatMap(collection -> Mono.from(collection + .deleteMany(clientSession, new Document("files_id", fileId)))); } else { - return Mono.from(chunksCollection.deleteMany(new Document("files_id", fileId))); + return chunksCollectionMono.flatMap(collection -> Mono.from(collection + .deleteMany(new Document("files_id", fileId)))); } } else { return Mono.empty(); } } - } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java index b6c3cb73c61..5ae7f4815e5 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java @@ -32,7 +32,10 @@ import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; import com.mongodb.client.model.vault.RewrapManyDataKeyResult; import com.mongodb.client.result.DeleteResult; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.FindPublisher; import com.mongodb.reactivestreams.client.MongoClient; import com.mongodb.reactivestreams.client.MongoClients; @@ -61,15 +64,22 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; import static com.mongodb.internal.capi.MongoCryptHelper.validateRewrapManyDataKeyOptions; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.internal.BsonUtil.mutableDeepCopy; /** *

      This class is not part of the public API and may be removed or changed at any time

      */ public class ClientEncryptionImpl implements ClientEncryption { + private static final String TIMEOUT_ERROR_MESSAGE_CREATE_DATA_KEY = "Creating data key exceeded the timeout limit."; + private static final String TIMEOUT_ERROR_MESSAGE_REWRAP_DATA_KEY = "Rewrapping data key exceeded the timeout limit."; + private static final String TIMEOUT_ERROR_MESSAGE_CREATE_COLLECTION = "Encryption collection creation exceeded the timeout limit."; private final Crypt crypt; private final ClientEncryptionSettings options; private final MongoClient keyVaultClient; @@ -85,10 +95,22 @@ public ClientEncryptionImpl(final MongoClient keyVaultClient, final ClientEncryp this.crypt = Crypts.create(keyVaultClient, options); this.options = options; MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace()); - this.collection = keyVaultClient.getDatabase(namespace.getDatabaseName()) + this.collection = getVaultCollection(keyVaultClient, options, namespace); + } + + private static MongoCollection getVaultCollection(final MongoClient keyVaultClient, + final ClientEncryptionSettings options, + final MongoNamespace namespace) { + MongoCollection vaultCollection = keyVaultClient.getDatabase(namespace.getDatabaseName()) .getCollection(namespace.getCollectionName(), BsonDocument.class) .withWriteConcern(WriteConcern.MAJORITY) .withReadConcern(ReadConcern.MAJORITY); + + Long timeoutMs = options.getTimeout(MILLISECONDS); + if (timeoutMs != null){ + vaultCollection = vaultCollection.withTimeout(timeoutMs, MILLISECONDS); + } + return vaultCollection; } @Override @@ -98,30 +120,47 @@ public Publisher createDataKey(final String kmsProvider) { @Override public Publisher createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions) { - return crypt.createDataKey(kmsProvider, dataKeyOptions) + return Mono.defer(() -> { + Timeout operationTimeout = startTimeout(); + return createDataKey(kmsProvider, dataKeyOptions, operationTimeout); + }); + } + + public Mono createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions, @Nullable final Timeout operationTimeout) { + return crypt.createDataKey(kmsProvider, dataKeyOptions, operationTimeout) .flatMap(dataKeyDocument -> { MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace()); - return Mono.from(keyVaultClient.getDatabase(namespace.getDatabaseName()) - .getCollection(namespace.getCollectionName(), BsonDocument.class) - .withWriteConcern(WriteConcern.MAJORITY) - .insertOne(dataKeyDocument)) + + MongoCollection vaultCollection = keyVaultClient + .getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + return Mono.from(collectionWithTimeout(vaultCollection, operationTimeout, TIMEOUT_ERROR_MESSAGE_CREATE_DATA_KEY) + .insertOne(dataKeyDocument)) .map(i -> dataKeyDocument.getBinary("_id")); }); } @Override public Publisher encrypt(final BsonValue value, final EncryptOptions options) { - return crypt.encryptExplicitly(value, options); + notNull("value", value); + notNull("options", options); + + return Mono.defer(() -> crypt.encryptExplicitly(value, options, startTimeout())); } @Override public Publisher encryptExpression(final Bson expression, final EncryptOptions options) { - return crypt.encryptExpression(expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options); + return Mono.defer(() -> crypt.encryptExpression( + expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), + options, + startTimeout())); } @Override public Publisher decrypt(final BsonBinary value) { - return crypt.decryptExplicitly(value); + notNull("value", value); + return Mono.defer(() -> crypt.decryptExplicitly(value, startTimeout())); } @Override @@ -180,8 +219,10 @@ public Publisher rewrapManyDataKey(final Bson filter) { @Override public Publisher rewrapManyDataKey(final Bson filter, final RewrapManyDataKeyOptions options) { - return Mono.fromRunnable(() -> validateRewrapManyDataKeyOptions(options)).then( - crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options) + return Mono.fromRunnable(() -> validateRewrapManyDataKeyOptions(options)) + .then(Mono.defer(()-> { + Timeout operationTimeout = startTimeout(); + return crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options, operationTimeout) .flatMap(results -> { if (results.isEmpty()) { return Mono.fromCallable(RewrapManyDataKeyResult::new); @@ -195,8 +236,10 @@ public Publisher rewrapManyDataKey(final Bson filter, f Updates.currentDate("updateDate")) ); }).collect(Collectors.toList()); - return Mono.from(collection.bulkWrite(updateModels)).map(RewrapManyDataKeyResult::new); - })); + return Mono.from(collectionWithTimeout(collection, operationTimeout, TIMEOUT_ERROR_MESSAGE_REWRAP_DATA_KEY) + .bulkWrite(updateModels)).map(RewrapManyDataKeyResult::new); + }); + })); } @Override @@ -222,6 +265,7 @@ public Publisher createEncryptedCollection(final MongoDatabase dat } String keyIdBsonKey = "keyId"; return Mono.defer(() -> { + Timeout operationTimeout = startTimeout(); // `Mono.defer` results in `maybeUpdatedEncryptedFields` and `dataKeyMightBeCreated` (mutable state) // being created once per `Subscriber`, which allows the produced `Mono` to support multiple `Subscribers`. BsonDocument maybeUpdatedEncryptedFields = mutableDeepCopy(encryptedFields); @@ -233,7 +277,7 @@ public Publisher createEncryptedCollection(final MongoDatabase dat .filter(field -> field.containsKey(keyIdBsonKey)) .filter(field -> Objects.equals(field.get(keyIdBsonKey), BsonNull.VALUE)) // here we rely on the `createDataKey` publisher being cold, i.e., doing nothing until it is subscribed to - .map(field -> Mono.fromDirect(createDataKey(kmsProvider, dataKeyOptions)) + .map(field -> Mono.fromDirect(createDataKey(kmsProvider, dataKeyOptions, operationTimeout)) // This is the closest we can do with reactive streams to setting the `dataKeyMightBeCreated` flag // immediately before calling `createDataKey`. .doOnSubscribe(subscription -> dataKeyMightBeCreated.set(true)) @@ -255,8 +299,10 @@ public Publisher createEncryptedCollection(final MongoDatabase dat // // Similarly, the `Subscriber` of the returned `Publisher` is guaranteed to observe all those write actions // via the `maybeUpdatedEncryptedFields` reference, which is emitted as a result of `thenReturn`. - .thenEmpty(Mono.defer(() -> Mono.fromDirect(database.createCollection(collectionName, - new CreateCollectionOptions(createCollectionOptions).encryptedFields(maybeUpdatedEncryptedFields)))) + .thenEmpty(Mono.defer(() -> Mono.fromDirect(databaseWithTimeout(database, + TIMEOUT_ERROR_MESSAGE_CREATE_COLLECTION, operationTimeout) + .createCollection(collectionName, new CreateCollectionOptions(createCollectionOptions) + .encryptedFields(maybeUpdatedEncryptedFields)))) ) .onErrorMap(e -> dataKeyMightBeCreated.get(), e -> new MongoUpdatedEncryptedFieldsException(maybeUpdatedEncryptedFields, @@ -265,7 +311,9 @@ public Publisher createEncryptedCollection(final MongoDatabase dat .thenReturn(maybeUpdatedEncryptedFields); }); } else { - return Mono.fromDirect(database.createCollection(collectionName, createCollectionOptions)) + return databaseWithTimeoutDeferred(database, startTimeout()) + .flatMap(wrappedDatabase -> Mono.fromDirect(wrappedDatabase + .createCollection(collectionName, createCollectionOptions))) .thenReturn(encryptedFields); } } @@ -275,4 +323,9 @@ public void close() { keyVaultClient.close(); crypt.close(); } + + @Nullable + private Timeout startTimeout() { + return TimeoutContext.startTimeout(options.getTimeout(MILLISECONDS)); + } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java index 06d5f713019..37d0236293b 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java @@ -19,6 +19,7 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.MongoUpdatedEncryptedFieldsException; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateEncryptedCollectionParams; import com.mongodb.client.model.vault.DataKeyOptions; @@ -108,7 +109,7 @@ public interface ClientEncryption extends Closeable { * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption * @mongodb.driver.manual reference/operator/aggregation/match/ $match */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) Publisher encryptExpression(Bson expression, EncryptOptions options); /** diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java index 36b09c21add..394ca1745e3 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java @@ -57,6 +57,7 @@ public void shouldPassAllOutcomes() { @After public void cleanUp() { + super.cleanUp(); if (mongoClient != null) { mongoClient.close(); } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java new file mode 100644 index 00000000000..75a19536cb7 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java @@ -0,0 +1,534 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.AbstractClientSideOperationsTimeoutProseTest; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.reactivestreams.client.gridfs.GridFSBucket; +import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; +import com.mongodb.reactivestreams.client.syncadapter.SyncGridFSBucket; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Hooks; +import reactor.test.StepVerifier; + +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.applyTimeoutMultiplierForServerless; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isServerlessTest; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.ClusterFixture.sleep; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * See https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#prose-tests + */ +public final class ClientSideOperationTimeoutProseTest extends AbstractClientSideOperationsTimeoutProseTest { + private MongoClient wrapped; + + @Override + protected com.mongodb.client.MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) { + wrapped = createReactiveClient(mongoClientSettings); + return new SyncMongoClient(wrapped); + } + + private static MongoClient createReactiveClient(final MongoClientSettings.Builder builder) { + return MongoClients.create(builder.build()); + } + + private static MongoClient createReactiveClient(final MongoClientSettings mongoClientSettings) { + return MongoClients.create(mongoClientSettings); + } + + @Override + protected com.mongodb.client.gridfs.GridFSBucket createGridFsBucket(final com.mongodb.client.MongoDatabase mongoDatabase, + final String bucketName) { + return new SyncGridFSBucket(GridFSBuckets.create(wrapped.getDatabase(mongoDatabase.getName()), bucketName)); + } + + private GridFSBucket createReaciveGridFsBucket(final MongoDatabase mongoDatabase, final String bucketName) { + return GridFSBuckets.create(mongoDatabase, bucketName); + } + + @Override + protected boolean isAsync() { + return true; + } + + @DisplayName("6. GridFS Upload - uploads via openUploadStream can be timed out") + @Test + @Override + public void testGridFSUploadViaOpenUploadStreamTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(405)) + + " }" + + "}"); + + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + applyTimeoutMultiplierForServerless(400), TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(gridFsFileNamespace.getDatabaseName()); + GridFSBucket gridFsBucket = createReaciveGridFsBucket(database, GRID_FS_BUCKET_NAME); + + + TestEventPublisher eventPublisher = new TestEventPublisher<>(); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream()) + .subscribe(testSubscriber); + + //when + eventPublisher.sendEvent(ByteBuffer.wrap(new byte[]{0x12})); + testSubscriber.requestMore(1); + /* + By prose spec definition we have to close GridFSUploadStream when we don't have more data to submit and want to flux internal buffers. + However, in Reactive streams that would be equivalent to calling propagating complete signal from the source publisher. + */ + eventPublisher.complete(); + + //then + testSubscriber.assertTerminalEvent(); + + List onErrorEvents = testSubscriber.getOnErrorEvents(); + assertEquals(1, onErrorEvents.size()); + + Throwable commandError = onErrorEvents.get(0); + Throwable operationTimeoutErrorCause = commandError.getCause(); + assertInstanceOf(MongoOperationTimeoutException.class, commandError); + assertInstanceOf(MongoSocketReadTimeoutException.class, operationTimeoutErrorCause); + + CommandFailedEvent chunkInsertFailedEvent = commandListener.getCommandFailedEvent("insert"); + assertNotNull(chunkInsertFailedEvent); + assertEquals(commandError, commandListener.getCommandFailedEvent("insert").getThrowable()); + } + } + + @DisplayName("6. GridFS Upload - Aborting an upload stream can be timed out") + @Test + @Override + public void testAbortingGridFsUploadStreamTimeout() throws ExecutionException, InterruptedException, TimeoutException { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + CompletableFuture droppedErrorFuture = new CompletableFuture<>(); + Hooks.onErrorDropped(droppedErrorFuture::complete); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"delete\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(405)) + + " }" + + "}"); + + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + applyTimeoutMultiplierForServerless(400), TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(gridFsFileNamespace.getDatabaseName()); + GridFSBucket gridFsBucket = createReaciveGridFsBucket(database, GRID_FS_BUCKET_NAME); + + + TestEventPublisher eventPublisher = new TestEventPublisher<>(); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream()) + .subscribe(testSubscriber); + + //when + eventPublisher.sendEvent(ByteBuffer.wrap(new byte[]{0x01, 0x02, 0x03, 0x04})); + testSubscriber.requestMore(1); + /* + By prose spec definition we have to abort GridFSUploadStream. + However, in Reactive streams that would be equivalent to calling subscription to propagate cancellation signal. + */ + testSubscriber.cancelSubscription(); + + //then + Throwable droppedError = droppedErrorFuture.get(TIMEOUT_DURATION.toMillis(), TimeUnit.MILLISECONDS); + Throwable commandError = droppedError.getCause(); + Throwable operationTimeoutErrorCause = commandError.getCause(); + + assertInstanceOf(MongoOperationTimeoutException.class, commandError); + assertInstanceOf(MongoSocketReadTimeoutException.class, operationTimeoutErrorCause); + + CommandFailedEvent deleteFailedEvent = commandListener.getCommandFailedEvent("delete"); + assertNotNull(deleteFailedEvent); + + assertEquals(commandError, commandListener.getCommandFailedEvent("delete").getThrowable()); + // When subscription is cancelled, we should not receive any more events. + testSubscriber.assertNoTerminalEvent(); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS applies to full resume attempt in a next call") + @Test + public void testTimeoutMSAppliesToFullResumeAttemptInNextCall() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + assumeFalse(isServerlessTest()); + + //given + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 500, TimeUnit.MILLISECONDS))) { + + MongoNamespace namespace = generateNamespace(); + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"getMore\" ]," + + " errorCode: 7," + + " errorLabels: [\"ResumableChangeStreamError\" ]" + + " }" + + "}"); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch( + singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}"))); + + Assertions.assertThrows(MongoOperationTimeoutException.class, + () -> Flux.from(documentChangeStreamPublisher).blockFirst(TIMEOUT_DURATION)); + //then + sleep(200); //let publisher invalidate the cursor after the error. + List commandStartedEvents = commandListener.getCommandStartedEvents(); + + List expectedCommandNames = Arrays.asList("aggregate", "getMore", "killCursors", "aggregate", "getMore", "killCursors"); + assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents); + + List commandFailedEvents = commandListener.getCommandFailedEvents(); + assertEquals(2, commandFailedEvents.size()); + + CommandFailedEvent firstGetMoreFailedEvent = commandFailedEvents.get(0); + assertEquals("getMore", firstGetMoreFailedEvent.getCommandName()); + assertInstanceOf(MongoCommandException.class, firstGetMoreFailedEvent.getThrowable()); + + CommandFailedEvent secondGetMoreFailedEvent = commandFailedEvents.get(1); + assertEquals("getMore", secondGetMoreFailedEvent.getCommandName()); + assertInstanceOf(MongoOperationTimeoutException.class, secondGetMoreFailedEvent.getThrowable()); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS applied to initial aggregate") + @Test + public void testTimeoutMSAppliedToInitialAggregate() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + assumeFalse(isServerlessTest()); + + //given + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 200, TimeUnit.MILLISECONDS))) { + + MongoNamespace namespace = generateNamespace(); + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch( + singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}"))) + .fullDocument(FullDocument.UPDATE_LOOKUP); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"aggregate\" ]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 201) + + " }" + + "}"); + + //when + Assertions.assertThrows(MongoOperationTimeoutException.class, + () -> Flux.from(documentChangeStreamPublisher).blockFirst(TIMEOUT_DURATION)); + + //We do not expect cursor to have been created. However, publisher closes cursor asynchronously, thus we give it some time + // to make sure that cursor has not been closed (which would indicate that it was created). + sleep(200); + + //then + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals("aggregate", commandStartedEvents.get(0).getCommandName()); + assertOnlyOneCommandTimeoutFailure("aggregate"); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS is refreshed for getMore if maxAwaitTimeMS is not set") + @Test + public void testTimeoutMsRefreshedForGetMoreWhenMaxAwaitTimeMsNotSet() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + assumeFalse(isServerlessTest()); + + //given + BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + sleep(2000); + + + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 300, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 3}," + + " data: {" + + " failCommands: [\"getMore\", \"aggregate\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 200) + + " }" + + "}"); + + collectionHelper.insertDocuments(WriteConcern.MAJORITY, + BsonDocument.parse("{x: 1}"), + BsonDocument.parse("{x: 2}"), + + BsonDocument.parse("{x: 3}"), + BsonDocument.parse("{x: 4}"), + + BsonDocument.parse("{x: 5}"), + BsonDocument.parse("{x: 6}")); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch() + .startAtOperationTime(startTime); + StepVerifier.create(documentChangeStreamPublisher, 2) + //then + .expectNextCount(2) + .thenAwait(Duration.ofMillis(300)) + .thenRequest(2) + .expectNextCount(2) + .thenAwait(Duration.ofMillis(300)) + .thenRequest(2) + .expectNextCount(2) + .thenAwait(Duration.ofMillis(300)) + .thenRequest(2) + .expectError(MongoOperationTimeoutException.class) + .verify(); + + sleep(500); //let publisher invalidate the cursor after the error. + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + List expectedCommandNames = Arrays.asList("aggregate", "getMore", "getMore", "getMore", "killCursors"); + assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents); + assertOnlyOneCommandTimeoutFailure("getMore"); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS is refreshed for getMore if maxAwaitTimeMS is set") + @Test + public void testTimeoutMsRefreshedForGetMoreWhenMaxAwaitTimeMsSet() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + assumeFalse(isServerlessTest()); + + //given + BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + sleep(2000); + + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 300, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()) + .withReadPreference(ReadPreference.primary()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 2}," + + " data: {" + + " failCommands: [\"aggregate\", \"getMore\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 200) + + " }" + + "}"); + + + collectionHelper.insertDocuments(WriteConcern.MAJORITY, + BsonDocument.parse("{x: 1}"), + BsonDocument.parse("{x: 2}"), + + BsonDocument.parse("{x: 3}"), + BsonDocument.parse("{x: 4}")); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch() + .maxAwaitTime(1, TimeUnit.MILLISECONDS) + .startAtOperationTime(startTime); + StepVerifier.create(documentChangeStreamPublisher, 2) + //then + .expectNextCount(2) + .thenAwait(Duration.ofMillis(600)) + .thenRequest(2) + .expectNextCount(2) + .thenCancel() + .verify(); + + sleep(500); //let publisher invalidate the cursor after the error. + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + List expectedCommandNames = Arrays.asList("aggregate", "getMore", "killCursors"); + assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS is honored for next operation when several getMore executed internally") + @Test + public void testTimeoutMsISHonoredForNnextOperationWhenSeveralGetMoreExecutedInternally() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + assumeFalse(isServerlessTest()); + + //given + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 2500, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch(); + StepVerifier.create(documentChangeStreamPublisher, 2) + //then + .expectError(MongoOperationTimeoutException.class) + .verify(); + + sleep(200); //let publisher invalidate the cursor after the error. + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertCommandStartedEventsInOder(Arrays.asList("aggregate", "getMore", "getMore", "getMore", "killCursors"), + commandStartedEvents); + assertOnlyOneCommandTimeoutFailure("getMore"); + } + } + + private static void assertCommandStartedEventsInOder(final List expectedCommandNames, + final List commandStartedEvents) { + assertEquals(expectedCommandNames.size(), commandStartedEvents.size(), "Expected: " + expectedCommandNames + ". Actual: " + + commandStartedEvents.stream() + .map(CommandStartedEvent::getCommand) + .map(BsonDocument::toJson) + .collect(Collectors.toList())); + + for (int i = 0; i < expectedCommandNames.size(); i++) { + CommandStartedEvent commandStartedEvent = commandStartedEvents.get(i); + + assertEquals(expectedCommandNames.get(i), commandStartedEvent.getCommandName()); + } + } + + private void assertOnlyOneCommandTimeoutFailure(final String command) { + List commandFailedEvents = commandListener.getCommandFailedEvents(); + assertEquals(1, commandFailedEvents.size()); + + CommandFailedEvent failedAggregateCommandEvent = commandFailedEvents.get(0); + assertEquals(command, commandFailedEvents.get(0).getCommandName()); + assertInstanceOf(MongoOperationTimeoutException.class, failedAggregateCommandEvent.getThrowable()); + } + + @Override + @BeforeEach + public void setUp() { + super.setUp(); + SyncMongoClient.enableSleepAfterSessionClose(postSessionCloseSleep()); + } + + @Override + @AfterEach + public void tearDown() { + super.tearDown(); + SyncMongoClient.disableSleep(); + } + + @Override + protected int postSessionCloseSleep() { + return 256; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java index e3ff5921ad2..2040e295d9a 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java @@ -17,7 +17,6 @@ package com.mongodb.reactivestreams.client; import com.mongodb.ReadConcern; -import com.mongodb.event.CommandEvent; import com.mongodb.event.CommandStartedEvent; import com.mongodb.internal.connection.TestCommandListener; import org.bson.BsonDocument; @@ -62,7 +61,7 @@ public void shouldIncludeReadConcernInCommand() throws InterruptedException { .find()) .block(TIMEOUT_DURATION); - List events = commandListener.getCommandStartedEvents(); + List events = commandListener.getCommandStartedEvents(); BsonDocument commandDocument = new BsonDocument("find", new BsonString("test")) .append("readConcern", ReadConcern.LOCAL.asDocument()) diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java new file mode 100644 index 00000000000..b8a40529dcd --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Sinks; + +public class TestEventPublisher { + private final Sinks.Many sink; + + public TestEventPublisher() { + this.sink = Sinks.many().unicast().onBackpressureBuffer(); + } + + // Method to send events + public void sendEvent(final T event) { + sink.tryEmitNext(event); + } + + public Flux getEventStream() { + return sink.asFlux(); + } + + public long currentSubscriberCount() { + return sink.currentSubscriberCount(); + } + + public void complete() { + sink.tryEmitComplete(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java index f6269c737ec..05411729ba7 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java @@ -135,6 +135,10 @@ public List getOnNextEvents() { return onNextEvents; } + public void cancelSubscription() { + subscription.cancel(); + } + /** * Assert that a particular sequence of items was received by this {@link Subscriber} in order. * diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java new file mode 100644 index 00000000000..5df9c571dbe --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.csot; + + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.csot.AbstractClientSideOperationsEncryptionTimeoutProseTest; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideOperationsEncryptionTimeoutProseTest extends AbstractClientSideOperationsEncryptionTimeoutProseTest { + public ClientEncryption createClientEncryption(final ClientEncryptionSettings.Builder builder) { + return new SyncClientEncryption(ClientEncryptions.create(builder.build())); + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings.Builder builder) { + return new SyncMongoClient(com.mongodb.reactivestreams.client.MongoClients.create(builder.build())); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java index 410dfd02fc4..ebbd2069f70 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java @@ -373,7 +373,7 @@ public void testBatchCursorReportsCursorErrors() { BsonDocument getMoreCommand = commandListener.getCommandStartedEvents().stream() .filter(e -> e.getCommandName().equals("getMore")) - .map(e -> ((CommandStartedEvent) e).getCommand()) + .map(CommandStartedEvent::getCommand) .findFirst() .get(); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java index 8e7b1af1bc9..102b96e424f 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java @@ -18,8 +18,10 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import org.bson.Document; import org.junit.jupiter.api.Test; @@ -36,6 +38,7 @@ import java.util.Queue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.IntStream; import static com.mongodb.reactivestreams.client.internal.TestHelper.OPERATION_EXECUTOR; @@ -169,6 +172,11 @@ BatchCursorPublisher createVerifiableBatchCursor(final List AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { return readOperation; } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (AsyncOperations::getTimeoutSettings); + } }; OperationExecutor executor = OPERATION_EXECUTOR; diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java index 21c0921225a..6b81b1f42af 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java @@ -17,6 +17,7 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.client.AggregateIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.AggregatePublisher; @@ -111,6 +112,12 @@ public AggregateIterable let(final Bson variables) { return this; } + @Override + public AggregateIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + @Override public Document explain() { return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java index 36aff9506ed..494e5f8c74e 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java @@ -21,6 +21,7 @@ import com.mongodb.TransactionOptions; import com.mongodb.client.ClientSession; import com.mongodb.client.TransactionBody; +import com.mongodb.internal.TimeoutContext; import com.mongodb.lang.Nullable; import com.mongodb.session.ServerSession; import org.bson.BsonDocument; @@ -182,6 +183,11 @@ public T withTransaction(final TransactionBody transactionBody, final Tra throw new UnsupportedOperationException(); } + @Override + public TimeoutContext getTimeoutContext() { + return wrapped.getTimeoutContext(); + } + private static void sleep(final long millis) { try { Thread.sleep(millis); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java index 1f4594270f9..7f50727621d 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java @@ -17,6 +17,7 @@ package com.mongodb.reactivestreams.client.syncadapter; import com.mongodb.client.DistinctIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.DistinctPublisher; @@ -69,4 +70,10 @@ public DistinctIterable comment(@Nullable final BsonValue comment) { wrapped.comment(comment); return this; } + + @Override + public DistinctIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java index 0cc68b0042e..3cf93b9ffb0 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java @@ -19,6 +19,7 @@ import com.mongodb.CursorType; import com.mongodb.ExplainVerbosity; import com.mongodb.client.FindIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.FindPublisher; @@ -174,6 +175,12 @@ public FindIterable allowDiskUse(@Nullable final java.lang.Boolean allowDiskU return this; } + @Override + public FindIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + @Override public Document explain() { return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java index a09b4ffbec3..48b28e5540a 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java @@ -42,6 +42,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; @@ -79,6 +80,11 @@ public ReadConcern getReadConcern() { return wrapped.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + @Override public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) { return new SyncGridFSBucket(wrapped.withChunkSizeBytes(chunkSizeBytes)); @@ -99,6 +105,11 @@ public GridFSBucket withReadConcern(final ReadConcern readConcern) { return new SyncGridFSBucket(wrapped.withReadConcern(readConcern)); } + @Override + public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncGridFSBucket(wrapped.withTimeout(timeout, timeUnit)); + } + @Override public GridFSUploadStream openUploadStream(final String filename) { return openUploadStream(filename, new GridFSUploadOptions()); @@ -197,7 +208,7 @@ public GridFSDownloadStream openDownloadStream(final ObjectId id) { @Override public GridFSDownloadStream openDownloadStream(final BsonValue id) { - throw new UnsupportedOperationException(); + return new SyncGridFSDownloadStream(wrapped.downloadToPublisher(id)); } @Override @@ -279,17 +290,17 @@ public GridFSFindIterable find() { @Override public GridFSFindIterable find(final Bson filter) { - throw new UnsupportedOperationException(); + return new SyncGridFSFindIterable(wrapped.find(filter)); } @Override public GridFSFindIterable find(final ClientSession clientSession) { - throw new UnsupportedOperationException(); + return new SyncGridFSFindIterable(wrapped.find(unwrap(clientSession))); } @Override public GridFSFindIterable find(final ClientSession clientSession, final Bson filter) { - throw new UnsupportedOperationException(); + return new SyncGridFSFindIterable(wrapped.find(unwrap(clientSession), filter)); } @Override @@ -334,12 +345,16 @@ public void rename(final ClientSession clientSession, final BsonValue id, final @Override public void drop() { - Mono.from(wrapped.drop()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + Mono.from(wrapped.drop()) + .contextWrite(CONTEXT) + .block(TIMEOUT_DURATION); } @Override public void drop(final ClientSession clientSession) { - Mono.from(wrapped.drop(unwrap(clientSession))).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + Mono.from(wrapped.drop()) + .contextWrite(CONTEXT) + .block(TIMEOUT_DURATION); } private void toOutputStream(final GridFSDownloadPublisher downloadPublisher, final OutputStream destination) { diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java new file mode 100644 index 00000000000..b3217b8f47d --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.MongoGridFSException; +import com.mongodb.client.gridfs.GridFSDownloadStream; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +public class SyncGridFSDownloadStream extends GridFSDownloadStream { + private final AtomicBoolean closed = new AtomicBoolean(false); + private ByteBuffer byteBuffer; + private final GridFSDownloadPublisher wrapped; + + public SyncGridFSDownloadStream(final GridFSDownloadPublisher publisher) { + this.wrapped = publisher; + this.byteBuffer = ByteBuffer.allocate(0); + } + + @Override + public GridFSFile getGridFSFile() { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSDownloadStream batchSize(final int batchSize) { + throw new UnsupportedOperationException(); + } + + @Override + public int read() { + checkClosed(); + readAll(); + + return byteBuffer.get(); + } + + @Override + public int read(final byte[] b) { + checkClosed(); + readAll(); + int remaining = byteBuffer.remaining(); + byteBuffer.get(b); + return remaining - byteBuffer.remaining(); + } + + @Override + public int read(final byte[] b, final int off, final int len) { + checkClosed(); + readAll(); + int remaining = byteBuffer.remaining(); + byteBuffer.get(b, off, len); + return remaining - byteBuffer.remaining(); + } + + @Override + public long skip(final long n) { + checkClosed(); + readAll(); + int position = byteBuffer.position(); + long min = Math.min(position, n); + byteBuffer.position((int) min); + return min; + } + + @Override + public int available() { + checkClosed(); + readAll(); + return byteBuffer.remaining(); + } + + @Override + public void mark() { + checkClosed(); + readAll(); + byteBuffer.mark(); + } + + @Override + public void reset() { + checkClosed(); + readAll(); + byteBuffer.reset(); + } + + @Override + public void close() { + closed.set(true); + } + + private void readAll() { + List byteBuffers = requireNonNull(Flux + .from(wrapped).contextWrite(CONTEXT).collectList().block((TIMEOUT_DURATION))); + + byteBuffer = byteBuffers.stream().reduce((byteBuffer1, byteBuffer2) -> { + byteBuffer1.put(byteBuffer2); + return byteBuffer1; + }).orElseThrow(() -> new IllegalStateException("No data found")); + } + + private void checkClosed() { + if (closed.get()) { + throw new MongoGridFSException("The DownloadStream has been closed"); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java new file mode 100644 index 00000000000..1021e6bc102 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.gridfs.GridFSFindIterable; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +class SyncGridFSFindIterable extends SyncMongoIterable implements GridFSFindIterable { + private final GridFSFindPublisher wrapped; + + SyncGridFSFindIterable(final GridFSFindPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public GridFSFindIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public GridFSFindIterable limit(final int limit) { + wrapped.limit(limit); + return this; + } + + @Override + public GridFSFindIterable skip(final int skip) { + wrapped.skip(skip); + return this; + } + + @Override + public GridFSFindIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public GridFSFindIterable sort(@Nullable final Bson sort) { + wrapped.sort(sort); + return this; + } + + @Override + public GridFSFindIterable noCursorTimeout(final boolean noCursorTimeout) { + wrapped.noCursorTimeout(noCursorTimeout); + return this; + } + + @Override + public GridFSFindIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public GridFSFindIterable collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java index 5dfa3fe76d6..48d88963077 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java @@ -17,6 +17,7 @@ package com.mongodb.reactivestreams.client.syncadapter; import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ListCollectionsPublisher; import org.bson.BsonValue; @@ -62,4 +63,10 @@ public ListCollectionsIterable comment(final BsonValue comment) { wrapped.comment(comment); return this; } + + @Override + public ListCollectionsIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java index 53f901e538b..4248e59c361 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java @@ -17,6 +17,7 @@ package com.mongodb.reactivestreams.client.syncadapter; import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ListDatabasesPublisher; import org.bson.BsonValue; @@ -74,4 +75,10 @@ public ListDatabasesIterable comment(final BsonValue comment) { wrapped.comment(comment); return this; } + + @Override + public ListDatabasesIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java index 3cec57e3ce0..947cb8f0d0f 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java @@ -17,6 +17,7 @@ package com.mongodb.reactivestreams.client.syncadapter; import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.reactivestreams.client.ListIndexesPublisher; import org.bson.BsonValue; @@ -54,4 +55,10 @@ public ListIndexesIterable comment(final BsonValue comment) { wrapped.comment(comment); return this; } + + @Override + public ListIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java index 7efbde8d9fa..f119c645916 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java @@ -18,6 +18,7 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.client.ListSearchIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher; import org.bson.BsonValue; @@ -80,6 +81,12 @@ public ListSearchIndexesIterable comment(final BsonValue comment) { return this; } + @Override + public ListSearchIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + @Override public Document explain() { return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java index 66a287cfa64..efc70b690fa 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java @@ -16,6 +16,7 @@ package com.mongodb.reactivestreams.client.syncadapter; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.conversions.Bson; @@ -106,6 +107,7 @@ public com.mongodb.client.MapReduceIterable databaseName(@Nullable final Stri return this; } + @Override public com.mongodb.client.MapReduceIterable batchSize(final int batchSize) { wrapped.batchSize(batchSize); @@ -124,4 +126,10 @@ public com.mongodb.client.MapReduceIterable collation(@Nullable final Collati wrapped.collation(collation); return this; } + + @Override + public com.mongodb.client.MapReduceIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java index 28d5adbdfc7..ceb5ea72769 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java @@ -17,31 +17,27 @@ package com.mongodb.reactivestreams.client.syncadapter; import com.mongodb.ClientSessionOptions; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; import com.mongodb.client.ChangeStreamIterable; import com.mongodb.client.ClientSession; import com.mongodb.client.ListDatabasesIterable; import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; import com.mongodb.connection.ClusterDescription; import com.mongodb.reactivestreams.client.internal.BatchCursor; -import org.bson.BsonDocument; import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; -import reactor.core.publisher.Mono; import java.util.List; - -import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; -import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; -import static java.util.Objects.requireNonNull; +import java.util.concurrent.TimeUnit; public class SyncMongoClient implements MongoClient { - private static long sleepAfterCursorOpenMS; - - private static long sleepAfterCursorCloseMS; - private static long sleepAfterSessionCloseMS; private static boolean waitForBatchCursorCreation; /** @@ -50,13 +46,17 @@ public class SyncMongoClient implements MongoClient { * can set this to a positive value. A value of 256 ms has been shown to work well. The default value is 0. */ public static void enableSleepAfterCursorOpen(final long sleepMS) { - if (sleepAfterCursorOpenMS != 0) { - throw new IllegalStateException("Already enabled"); - } - if (sleepMS <= 0) { - throw new IllegalArgumentException("sleepMS must be a positive value"); - } - sleepAfterCursorOpenMS = sleepMS; + SyncMongoCluster.enableSleepAfterCursorOpen(sleepMS); + } + + /** + * Unfortunately this is the only way to wait for error logic to complete, since it's asynchronous. + * This is inherently racy but there are not any other good options. Tests which require cursor error handling to complete before + * execution of the next operation can set this to a positive value. A value of 256 ms has been shown to work well. The default + * value is 0. + */ + public static void enableSleepAfterCursorError(final long sleepMS) { + SyncMongoCluster.enableSleepAfterCursorError(sleepMS); } /** @@ -66,13 +66,7 @@ public static void enableSleepAfterCursorOpen(final long sleepMS) { * value is 0. */ public static void enableSleepAfterCursorClose(final long sleepMS) { - if (sleepAfterCursorCloseMS != 0) { - throw new IllegalStateException("Already enabled"); - } - if (sleepMS <= 0) { - throw new IllegalArgumentException("sleepMS must be a positive value"); - } - sleepAfterCursorCloseMS = sleepMS; + SyncMongoCluster.enableSleepAfterCursorClose(sleepMS); } /** @@ -81,13 +75,7 @@ public static void enableSleepAfterCursorClose(final long sleepMS) { * the attempt is racy and incorrect, but good enough for tests given that no other approach is available. */ public static void enableSleepAfterSessionClose(final long sleepMS) { - if (sleepAfterSessionCloseMS != 0) { - throw new IllegalStateException("Already enabled"); - } - if (sleepMS <= 0) { - throw new IllegalArgumentException("sleepMS must be a positive value"); - } - sleepAfterSessionCloseMS = sleepMS; + SyncMongoCluster.enableSleepAfterSessionClose(sleepMS); } /** @@ -112,27 +100,31 @@ public static void disableWaitForBatchCursorCreation() { } public static void disableSleep() { - sleepAfterCursorOpenMS = 0; - sleepAfterCursorCloseMS = 0; - sleepAfterSessionCloseMS = 0; + SyncMongoCluster.disableSleep(); } public static long getSleepAfterCursorOpen() { - return sleepAfterCursorOpenMS; + return SyncMongoCluster.getSleepAfterCursorOpen(); + } + + public static long getSleepAfterCursorError() { + return SyncMongoCluster.getSleepAfterCursorError(); } public static long getSleepAfterCursorClose() { - return sleepAfterCursorCloseMS; + return SyncMongoCluster.getSleepAfterCursorClose(); } public static long getSleepAfterSessionClose() { - return sleepAfterSessionCloseMS; + return SyncMongoCluster.getSleepAfterSessionClose(); } private final com.mongodb.reactivestreams.client.MongoClient wrapped; + private final SyncMongoCluster delegate; public SyncMongoClient(final com.mongodb.reactivestreams.client.MongoClient wrapped) { this.wrapped = wrapped; + this.delegate = new SyncMongoCluster(wrapped); } public com.mongodb.reactivestreams.client.MongoClient getWrapped() { @@ -140,102 +132,151 @@ public com.mongodb.reactivestreams.client.MongoClient getWrapped() { } @Override - public MongoDatabase getDatabase(final String databaseName) { - return new SyncMongoDatabase(wrapped.getDatabase(databaseName)); + public CodecRegistry getCodecRegistry() { + return delegate.getCodecRegistry(); } @Override - public ClientSession startSession() { - return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this); + public ReadPreference getReadPreference() { + return delegate.getReadPreference(); } @Override - public ClientSession startSession(final ClientSessionOptions options) { - return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession(options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this); + public WriteConcern getWriteConcern() { + return delegate.getWriteConcern(); } @Override - public void close() { - wrapped.close(); + public ReadConcern getReadConcern() { + return delegate.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return delegate.getTimeout(timeUnit); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return delegate.withCodecRegistry(codecRegistry); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return delegate.withReadPreference(readPreference); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return delegate.withWriteConcern(writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return delegate.withReadConcern(readConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return delegate.withTimeout(timeout, timeUnit); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return delegate.getDatabase(databaseName); + } + + @Override + public ClientSession startSession() { + return delegate.startSession(); + } + + @Override + public ClientSession startSession(final ClientSessionOptions options) { + return delegate.startSession(options); } @Override public MongoIterable listDatabaseNames() { - return listDatabases(BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue()); + return delegate.listDatabaseNames(); } @Override public MongoIterable listDatabaseNames(final ClientSession clientSession) { - return listDatabases(clientSession, BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue()); + return delegate.listDatabaseNames(clientSession); } + @Override public ListDatabasesIterable listDatabases() { - return new SyncListDatabasesIterable<>(wrapped.listDatabases()); + return delegate.listDatabases(); } @Override public ListDatabasesIterable listDatabases(final ClientSession clientSession) { - return listDatabases(clientSession, Document.class); + return delegate.listDatabases(clientSession); } @Override public ListDatabasesIterable listDatabases(final Class resultClass) { - return new SyncListDatabasesIterable<>(wrapped.listDatabases(resultClass)); + return delegate.listDatabases(resultClass); } @Override public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class resultClass) { - return new SyncListDatabasesIterable<>(wrapped.listDatabases(unwrap(clientSession), resultClass)); + return delegate.listDatabases(clientSession, resultClass); } @Override public ChangeStreamIterable watch() { - return new SyncChangeStreamIterable<>(wrapped.watch()); + return delegate.watch(); } @Override public ChangeStreamIterable watch(final Class resultClass) { - return new SyncChangeStreamIterable<>(wrapped.watch(resultClass)); + return delegate.watch(resultClass); } @Override public ChangeStreamIterable watch(final List pipeline) { - return new SyncChangeStreamIterable<>(wrapped.watch(pipeline)); + return delegate.watch(pipeline); } @Override public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { - return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, resultClass)); + return delegate.watch(pipeline, resultClass); } @Override public ChangeStreamIterable watch(final ClientSession clientSession) { - return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession))); + return delegate.watch(clientSession); } @Override public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { - return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), resultClass)); + return delegate.watch(clientSession, resultClass); } @Override public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { - return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline)); + return delegate.watch(clientSession, pipeline); } @Override - public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, - final Class resultClass) { - return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass)); + public ChangeStreamIterable watch( + final ClientSession clientSession, final List pipeline, final Class resultClass) { + return delegate.watch(clientSession, pipeline, resultClass); } + @Override + public void close() { + wrapped.close(); + } + + @Override public ClusterDescription getClusterDescription() { return wrapped.getClusterDescription(); } - private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) { - return ((SyncClientSession) clientSession).getWrapped(); - } } diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java new file mode 100644 index 00000000000..780f7260eb4 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java @@ -0,0 +1,284 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +public class SyncMongoCluster implements MongoCluster { + + private static long sleepAfterCursorOpenMS; + private static long sleepAfterCursorErrorMS; + private static long sleepAfterCursorCloseMS; + private static long sleepAfterSessionCloseMS; + + /** + * Unfortunately this is the only way to wait for a query to be initiated, since Reactive Streams is asynchronous + * and we have no way of knowing. Tests which require cursor initiation to complete before execution of the next operation + * can set this to a positive value. A value of 256 ms has been shown to work well. The default value is 0. + */ + public static void enableSleepAfterCursorOpen(final long sleepMS) { + if (sleepAfterCursorOpenMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterCursorOpenMS = sleepMS; + } + + /** + * Unfortunately this is the only way to wait for error logic to complete, since it's asynchronous. + * This is inherently racy but there are not any other good options. Tests which require cursor error handling to complete before + * execution of the next operation can set this to a positive value. A value of 256 ms has been shown to work well. The default + * value is 0. + */ + public static void enableSleepAfterCursorError(final long sleepMS) { + if (sleepAfterCursorErrorMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterCursorErrorMS = sleepMS; + } + + /** + * Unfortunately this is the only way to wait for close to complete, since it's asynchronous. + * This is inherently racy but there are not any other good options. Tests which require cursor cancellation to complete before + * execution of the next operation can set this to a positive value. A value of 256 ms has been shown to work well. The default + * value is 0. + */ + public static void enableSleepAfterCursorClose(final long sleepMS) { + if (sleepAfterCursorCloseMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterCursorCloseMS = sleepMS; + } + + + /** + * Enables {@linkplain Thread#sleep(long) sleeping} in {@link SyncClientSession#close()} to wait until asynchronous closing actions + * are done. It is an attempt to make asynchronous {@link SyncMongoClient#close()} method synchronous; + * the attempt is racy and incorrect, but good enough for tests given that no other approach is available. + */ + public static void enableSleepAfterSessionClose(final long sleepMS) { + if (sleepAfterSessionCloseMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterSessionCloseMS = sleepMS; + } + + public static void disableSleep() { + sleepAfterCursorOpenMS = 0; + sleepAfterCursorErrorMS = 0; + sleepAfterCursorCloseMS = 0; + sleepAfterSessionCloseMS = 0; + } + + public static long getSleepAfterCursorOpen() { + return sleepAfterCursorOpenMS; + } + + public static long getSleepAfterCursorError() { + return sleepAfterCursorErrorMS; + } + + public static long getSleepAfterCursorClose() { + return sleepAfterCursorCloseMS; + } + + public static long getSleepAfterSessionClose() { + return sleepAfterSessionCloseMS; + } + + private final com.mongodb.reactivestreams.client.MongoCluster wrapped; + + public SyncMongoCluster(final com.mongodb.reactivestreams.client.MongoCluster wrapped) { + this.wrapped = wrapped; + } + + public com.mongodb.reactivestreams.client.MongoCluster getWrapped() { + return wrapped; + } + + @Override + public CodecRegistry getCodecRegistry() { + return wrapped.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return wrapped.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return wrapped.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return new SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return new SyncMongoCluster(wrapped.withReadPreference(readPreference)); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return new SyncMongoCluster(wrapped.withWriteConcern(writeConcern)); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return new SyncMongoCluster(wrapped.withReadConcern(readConcern)); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit)); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return new SyncMongoDatabase(wrapped.getDatabase(databaseName)); + } + + @Override + public ClientSession startSession() { + return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this); + } + + @Override + public ClientSession startSession(final ClientSessionOptions options) { + return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession(options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this); + } + + @Override + public MongoIterable listDatabaseNames() { + return listDatabases(BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue()); + } + + @Override + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + return listDatabases(clientSession, BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue()); + } + + @Override + public ListDatabasesIterable listDatabases() { + return new SyncListDatabasesIterable<>(wrapped.listDatabases()); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return listDatabases(clientSession, Document.class); + } + + @Override + public ListDatabasesIterable listDatabases(final Class resultClass) { + return new SyncListDatabasesIterable<>(wrapped.listDatabases(resultClass)); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class resultClass) { + return new SyncListDatabasesIterable<>(wrapped.listDatabases(unwrap(clientSession), resultClass)); + } + + @Override + public ChangeStreamIterable watch() { + return new SyncChangeStreamIterable<>(wrapped.watch()); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(resultClass)); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline)); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession))); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass)); + } + + private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) { + return ((SyncClientSession) clientSession).getWrapped(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java index 64d94984b2e..922e07cc2d5 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java @@ -59,6 +59,7 @@ import reactor.core.publisher.Mono; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; @@ -102,6 +103,11 @@ public ReadConcern getReadConcern() { return wrapped.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + @Override public MongoCollection withDocumentClass(final Class clazz) { return new SyncMongoCollection<>(wrapped.withDocumentClass(clazz)); @@ -127,6 +133,11 @@ public MongoCollection withReadConcern(final ReadConcern readConcern) { return new SyncMongoCollection<>(wrapped.withReadConcern(readConcern)); } + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoCollection<>(wrapped.withTimeout(timeout, timeUnit)); + } + @Override public long countDocuments() { return requireNonNull(Mono.from(wrapped.countDocuments()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java index 63485fba132..4e0159f90d0 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java @@ -44,6 +44,7 @@ import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorClose; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorError; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorOpen; import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.isWaitForBatchCursorCreationEnabled; @@ -91,6 +92,7 @@ public void onNext(final T t) { @Override public void onError(final Throwable t) { results.addLast(t); + sleep(getSleepAfterCursorError()); } @Override @@ -155,6 +157,7 @@ public boolean hasNext() { throw new MongoTimeoutException("Time out waiting for result from cursor"); } else if (next instanceof Throwable) { error = translateError((Throwable) next); + sleep(getSleepAfterCursorError()); throw error; } else if (next == COMPLETED) { completed = true; diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java index f1e6d125842..40b15632366 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java @@ -34,6 +34,7 @@ import reactor.core.publisher.Mono; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; @@ -75,6 +76,11 @@ public ReadConcern getReadConcern() { return wrapped.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + @Override public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) { return new SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)); @@ -95,6 +101,11 @@ public MongoDatabase withReadConcern(final ReadConcern readConcern) { return new SyncMongoDatabase(wrapped.withReadConcern(readConcern)); } + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)); + } + @Override public MongoCollection getCollection(final String collectionName) { return new SyncMongoCollection<>(wrapped.getCollection(collectionName)); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java new file mode 100644 index 00000000000..b109931bedf --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java @@ -0,0 +1,160 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.connection.TransportSettings; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import reactor.core.publisher.Hooks; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.client.ClientSideOperationTimeoutTest.skipOperationTimeoutTests; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableSleep; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorError; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assumptions.assumeFalse; + + +// See https://github.com/mongodb/specifications/tree/master/source/client-side-operation-timeout/tests +public class ClientSideOperationTimeoutTest extends UnifiedReactiveStreamsTest { + + private final AtomicReference atomicReferenceThrowable = new AtomicReference<>(); + + private static Collection data() throws URISyntaxException, IOException { + return getTestData("unified-test-format/client-side-operation-timeout"); + } + + @Override + protected void skips(final String fileDescription, final String testDescription) { + skipOperationTimeoutTests(fileDescription, testDescription); + + assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set"), + "No iterateOnce support. There is alternative prose test for it."); + assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if maxAwaitTimeMS is set"), + "No iterateOnce support. There is alternative prose test for it."); + /* + The Reactive Streams specification prevents us from allowing a subsequent next call (event in reactive terms) after a timeout error, + conflicting with the CSOT spec requirement not to invalidate the change stream and to try resuming and establishing a new change + stream on the server. We immediately let users know about a timeout error, which then closes the stream/publisher. + */ + assumeFalse(testDescription.equals("change stream can be iterated again if previous iteration times out"), + "It is not possible due to a conflict with the Reactive Streams specification ."); + assumeFalse(testDescription.equals("timeoutMS applies to full resume attempt in a next call"), + "Flaky and racy due to asynchronous behaviour. There is alternative prose test for it."); + assumeFalse(testDescription.equals("timeoutMS applied to initial aggregate"), + "No way to catch an error on BarchCursor creation. There is alternative prose test for it."); + + assumeFalse(testDescription.endsWith("createChangeStream on client")); + assumeFalse(testDescription.endsWith("createChangeStream on database")); + assumeFalse(testDescription.endsWith("createChangeStream on collection")); + + // No withTransaction support + assumeFalse(fileDescription.contains("withTransaction") || testDescription.contains("withTransaction")); + + if (testDescription.equals("timeoutMS is refreshed for close")) { + enableSleepAfterCursorError(256); + } + + /* + * The test is occasionally racy. The "killCursors" command may appear as an additional event. This is unexpected in unified tests, + * but anticipated in reactive streams because an operation timeout error triggers the closure of the stream/publisher. + */ + ignoreExtraCommandEvents(testDescription.contains("timeoutMS is refreshed for getMore - failure")); + + Hooks.onOperatorDebug(); + Hooks.onErrorDropped(atomicReferenceThrowable::set); + } + + @ParameterizedTest(name = "{0}: {1}") + @MethodSource("data") + @Override + public void shouldPassAllOutcomes( + @Nullable final String fileDescription, + @Nullable final String testDescription, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + try { + super.shouldPassAllOutcomes(fileDescription, + testDescription, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); + + } catch (AssertionError e) { + assertNoDroppedError(format("%s failed due to %s.\n" + + "The test also caused a dropped error; `onError` called with no handler.", + testDescription, e.getMessage())); + if (racyTestAssertion(testDescription, e)) { + // Ignore failure - racy test often no time to do the getMore + return; + } + throw e; + } + assertNoDroppedError(format("%s passed but there was a dropped error; `onError` called with no handler.", testDescription)); + } + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + TransportSettings overriddenTransportSettings = ClusterFixture.getOverriddenTransportSettings(); + MongoClientSettings clientSettings = overriddenTransportSettings == null ? settings + : MongoClientSettings.builder(settings).transportSettings(overriddenTransportSettings).build(); + return new SyncMongoClient(MongoClients.create(clientSettings)); + } + + @AfterEach + public void cleanUp() { + super.cleanUp(); + disableSleep(); + Hooks.resetOnOperatorDebug(); + Hooks.resetOnErrorDropped(); + } + + public static boolean racyTestAssertion(final String testDescription, final AssertionError e) { + return RACY_GET_MORE_TESTS.contains(testDescription) && e.getMessage().startsWith("Number of events must be the same"); + } + + private static final List RACY_GET_MORE_TESTS = asList( + "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime", + "remaining timeoutMS applied to getMore if timeoutMode is unset"); + + private void assertNoDroppedError(final String message) { + Throwable droppedError = atomicReferenceThrowable.get(); + if (droppedError != null) { + throw new AssertionError(message, droppedError); + } + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java index 17fb4479e8c..cfbf5a0a5b8 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java @@ -42,7 +42,7 @@ import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -77,17 +77,17 @@ void shouldBuildTheExpectedOperation() { .collation(COLLATION) .comment("my comment") .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) - .maxTime(10, SECONDS); + .maxAwaitTime(1001, MILLISECONDS) + .maxTime(101, MILLISECONDS); - expectedOperation + expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) .allowDiskUse(true) .batchSize(100) .collation(COLLATION) .comment(new BsonString("my comment")) - .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) - .maxTime(10, SECONDS); + .hint(BsonDocument.parse("{a: 1}")); Flux.from(publisher).blockFirst(); assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); @@ -104,7 +104,7 @@ void shouldBuildTheExpectedOperationForHintString() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, - getDefaultCodecRegistry().get(Document.class)) + getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -128,7 +128,7 @@ void shouldBuildTheExpectedOperationForHintPlusHintString() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, - getDefaultCodecRegistry().get(Document.class)) + getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -156,8 +156,7 @@ void shouldBuildTheExpectedOperationsForDollarOut() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher).blockFirst(); @@ -174,16 +173,16 @@ void shouldBuildTheExpectedOperationsForDollarOut() { .collation(COLLATION) .comment("my comment") .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) // Ignored on $out - .maxTime(10, SECONDS); + .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out + .maxTime(100, MILLISECONDS); - expectedOperation + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) .allowDiskUse(true) .bypassDocumentValidation(true) .collation(COLLATION) .comment(new BsonString("my comment")) - .hint(BsonDocument.parse("{a: 1}")) - .maxTime(10, SECONDS); + .hint(BsonDocument.parse("{a: 1}")); Flux.from(publisher).blockFirst(); assertEquals(ReadPreference.primary(), executor.getReadPreference()); @@ -195,8 +194,6 @@ void shouldBuildTheExpectedOperationsForDollarOut() { .batchSize(100) .collation(COLLATION) .filter(new BsonDocument()) - .maxAwaitTime(0, SECONDS) - .maxTime(0, SECONDS) .comment(new BsonString("my comment")) .retryReads(true); @@ -205,7 +202,8 @@ void shouldBuildTheExpectedOperationsForDollarOut() { // Should handle database level aggregations publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); Flux.from(publisher).blockFirst(); operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); @@ -215,7 +213,8 @@ void shouldBuildTheExpectedOperationsForDollarOut() { // Should handle toCollection publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher.toCollection()).blockFirst(); @@ -235,8 +234,7 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintString() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); publisher .hintString("x_1"); @@ -263,8 +261,7 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintPlusHintString() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); publisher .hint(new Document("x", 1)) @@ -296,8 +293,8 @@ void shouldBuildTheExpectedOperationsForDollarOutAsDocument() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION) .toCollection(); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); Flux.from(toCollectionPublisher).blockFirst(); assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); @@ -337,8 +334,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher).blockFirst(); @@ -355,16 +351,16 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { .collation(COLLATION) .comment(new BsonInt32(1)) .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) // Ignored on $out - .maxTime(10, SECONDS); + .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out + .maxTime(100, MILLISECONDS); - expectedOperation + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) .allowDiskUse(true) .bypassDocumentValidation(true) .collation(COLLATION) .comment(new BsonInt32(1)) - .hint(BsonDocument.parse("{a: 1}")) - .maxTime(10, SECONDS); + .hint(BsonDocument.parse("{a: 1}")); Flux.from(publisher).blockFirst(); assertEquals(ReadPreference.primary(), executor.getReadPreference()); @@ -376,8 +372,6 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { .batchSize(100) .collation(COLLATION) .filter(new BsonDocument()) - .maxAwaitTime(0, SECONDS) - .maxTime(0, SECONDS) .comment(new BsonInt32(1)) .retryReads(true); @@ -386,7 +380,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { // Should handle database level aggregations publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); Flux.from(publisher).blockFirst(); operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); @@ -396,7 +391,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { // Should handle toCollection publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher.toCollection()).blockFirst(); @@ -416,8 +412,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeString() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java index d8a0083173c..7c2ab637c27 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java @@ -40,7 +40,7 @@ import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -57,7 +57,8 @@ void shouldBuildTheExpectedOperation() { Document.class, pipeline, ChangeStreamLevel.COLLECTION); ChangeStreamOperation> expectedOperation = - new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, codec) + new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, + codec) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -72,16 +73,17 @@ void shouldBuildTheExpectedOperation() { .batchSize(100) .collation(COLLATION) .comment("comment") - .maxAwaitTime(20, SECONDS) + .maxAwaitTime(101, MILLISECONDS) .fullDocument(FullDocument.UPDATE_LOOKUP); - expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, - codec).retryReads(true); + expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, + pipeline, + codec).retryReads(true); expectedOperation .batchSize(100) .collation(COLLATION) - .comment(new BsonString("comment")) - .maxAwaitTime(20, SECONDS); + .comment(new BsonString("comment")); Flux.from(publisher).blockFirst(); assertEquals(ReadPreference.primary(), executor.getReadPreference()); @@ -103,7 +105,7 @@ void shouldBuildTheExpectedOperationWhenSettingDocumentClass() { ChangeStreamOperation expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, - getDefaultCodecRegistry().get(BsonDocument.class)) + getDefaultCodecRegistry().get(BsonDocument.class)) .batchSize(batchSize) .comment(new BsonInt32(1)) .retryReads(true); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy index 4879fa19466..d6233342291 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy @@ -23,7 +23,6 @@ import com.mongodb.async.FutureResultCallback import com.mongodb.connection.ServerConnectionState import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerType -import com.mongodb.internal.IgnorableRequestContext import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding import com.mongodb.internal.binding.AsyncClusterBinding import com.mongodb.internal.binding.AsyncConnectionSource @@ -34,15 +33,19 @@ import com.mongodb.internal.session.ClientSessionContext import com.mongodb.reactivestreams.client.ClientSession import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + class ClientSessionBindingSpecification extends Specification { def 'should return the session context from the binding'() { given: def session = Stub(ClientSession) - def wrappedBinding = Stub(AsyncClusterAwareReadWriteBinding) + def wrappedBinding = Stub(AsyncClusterAwareReadWriteBinding) { + getOperationContext() >> OPERATION_CONTEXT + } def binding = new ClientSessionBinding(session, false, wrappedBinding) when: - def context = binding.getSessionContext() + def context = binding.getOperationContext().getSessionContext() then: (context as ClientSessionContext).getClientSession() == session @@ -51,7 +54,9 @@ class ClientSessionBindingSpecification extends Specification { def 'should return the session context from the connection source'() { given: def session = Stub(ClientSession) - def wrappedBinding = Mock(AsyncClusterAwareReadWriteBinding) + def wrappedBinding = Mock(AsyncClusterAwareReadWriteBinding) { + getOperationContext() >> OPERATION_CONTEXT + } wrappedBinding.retain() >> wrappedBinding def binding = new ClientSessionBinding(session, false, wrappedBinding) @@ -65,7 +70,7 @@ class ClientSessionBindingSpecification extends Specification { } when: - def context = futureResultCallback.get().getSessionContext() + def context = futureResultCallback.get().getOperationContext().getSessionContext() then: (context as ClientSessionContext).getClientSession() == session @@ -80,7 +85,7 @@ class ClientSessionBindingSpecification extends Specification { } when: - context = futureResultCallback.get().getSessionContext() + context = futureResultCallback.get().getOperationContext().getSessionContext() then: (context as ClientSessionContext).getClientSession() == session @@ -166,7 +171,7 @@ class ClientSessionBindingSpecification extends Specification { def binding = new ClientSessionBinding(session, ownsSession, wrappedBinding) then: - binding.getSessionContext().isImplicitSession() == ownsSession + binding.getOperationContext().getSessionContext().isImplicitSession() == ownsSession where: ownsSession << [true, false] @@ -182,6 +187,6 @@ class ClientSessionBindingSpecification extends Specification { .build()), null) } } - new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, null, IgnorableRequestContext.INSTANCE) + new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) } } diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java index 62a7596a681..eab28373f2a 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java @@ -34,7 +34,6 @@ import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; public class FindPublisherImplTest extends TestHelper { @@ -50,7 +49,8 @@ void shouldBuildTheExpectedOperation() { TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); FindPublisher publisher = new FindPublisherImpl<>(null, createMongoOperationPublisher(executor), new Document()); - FindOperation expectedOperation = new FindOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + FindOperation expectedOperation = new FindOperation<>(NAMESPACE, + getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true) .filter(new BsonDocument()); @@ -66,8 +66,8 @@ void shouldBuildTheExpectedOperation() { .filter(new Document("filter", 1)) .sort(Sorts.ascending("sort")) .projection(new Document("projection", 1)) - .maxTime(10, SECONDS) - .maxAwaitTime(20, SECONDS) + .maxTime(101, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) .batchSize(100) .limit(100) .skip(10) @@ -83,7 +83,10 @@ void shouldBuildTheExpectedOperation() { .showRecordId(false) .allowDiskUse(false); - expectedOperation + expectedOperation = new FindOperation<>(NAMESPACE, + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) + .filter(new BsonDocument()) .allowDiskUse(false) .batchSize(100) .collation(COLLATION) @@ -93,8 +96,6 @@ void shouldBuildTheExpectedOperation() { .hint(new BsonString("a_1")) .limit(100) .max(new BsonDocument("max", new BsonInt32(1))) - .maxAwaitTime(20000, MILLISECONDS) - .maxTime(10000, MILLISECONDS) .min(new BsonDocument("min", new BsonInt32(1))) .projection(new BsonDocument("projection", new BsonInt32(1))) .returnKey(false) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java index 36891f1031f..6613723b49d 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java @@ -35,6 +35,7 @@ final class ListCollectionNamesPublisherImplTest extends TestHelper { private static final String DATABASE_NAME = NAMESPACE.getDatabaseName(); + @SuppressWarnings("deprecation") @DisplayName("Should build the expected ListCollectionsOperation") @Test void shouldBuildTheExpectedOperation() { @@ -45,7 +46,7 @@ void shouldBuildTheExpectedOperation() { .authorizedCollections(true); ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, - getDefaultCodecRegistry().get(Document.class)) + getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .nameOnly(true) .authorizedCollections(true) @@ -63,9 +64,12 @@ void shouldBuildTheExpectedOperation() { .maxTime(10, SECONDS) .batchSize(100); - expectedOperation + expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(Document.class)) + .nameOnly(true) + .authorizedCollections(true) + .retryReads(true) .filter(new BsonDocument("filter", new BsonInt32(1))) - .maxTime(10, SECONDS) .batchSize(100); Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java index c875ab7973c..a632edbae82 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java @@ -28,7 +28,7 @@ import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; public class ListCollectionsPublisherImplTest extends TestHelper { @@ -56,12 +56,14 @@ void shouldBuildTheExpectedOperation() { // Should apply settings publisher .filter(new Document("filter", 1)) - .maxTime(10, SECONDS) + .maxTime(100, MILLISECONDS) .batchSize(100); - expectedOperation + expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(String.class)) + .nameOnly(true) + .retryReads(true) .filter(new BsonDocument("filter", new BsonInt32(1))) - .maxTime(10, SECONDS) .batchSize(100); Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java index 749f11b8e0a..c19a56f14cc 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java @@ -28,7 +28,7 @@ import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; public class ListDatabasesPublisherImplTest extends TestHelper { @@ -41,7 +41,8 @@ void shouldBuildTheExpectedOperation() { TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); ListDatabasesPublisher publisher = new ListDatabasesPublisherImpl<>(null, createMongoOperationPublisher(executor)); - ListDatabasesOperation expectedOperation = new ListDatabasesOperation<>(getDefaultCodecRegistry().get(Document.class)) + ListDatabasesOperation expectedOperation = new ListDatabasesOperation<>( + getDefaultCodecRegistry().get(Document.class)) .retryReads(true); // default input should be as expected @@ -54,13 +55,14 @@ void shouldBuildTheExpectedOperation() { publisher .authorizedDatabasesOnly(true) .filter(new Document("filter", 1)) - .maxTime(10, SECONDS) + .maxTime(100, MILLISECONDS) .batchSize(100); - expectedOperation + expectedOperation = new ListDatabasesOperation<>( + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) .authorizedDatabasesOnly(true) - .filter(new BsonDocument("filter", new BsonInt32(1))) - .maxTime(10, SECONDS); + .filter(new BsonDocument("filter", new BsonInt32(1))); configureBatchCursor(); Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java index 1929c4c3476..5ae221b8a02 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java @@ -27,7 +27,7 @@ import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; public class ListIndexesPublisherImplTest extends TestHelper { @@ -54,13 +54,13 @@ void shouldBuildTheExpectedOperation() { assertEquals(ReadPreference.primary(), executor.getReadPreference()); // Should apply settings - publisher - .batchSize(100) - .maxTime(10, SECONDS); + publisher.batchSize(100) + .maxTime(100, MILLISECONDS); - expectedOperation - .batchSize(100) - .maxTime(10, SECONDS); + expectedOperation = + new ListIndexesOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + .batchSize(100) + .retryReads(true); configureBatchCursor(); Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java index 451772e5751..c112395a818 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java @@ -36,7 +36,7 @@ import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -57,9 +57,9 @@ void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() { com.mongodb.reactivestreams.client.MapReducePublisher publisher = new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION); - MapReduceWithInlineResultsOperation expectedOperation = - new MapReduceWithInlineResultsOperation<>(NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), - getDefaultCodecRegistry().get(Document.class)).verbose(true); + MapReduceWithInlineResultsOperation expectedOperation = new MapReduceWithInlineResultsOperation<>( + NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + getDefaultCodecRegistry().get(Document.class)).verbose(true); // default input should be as expected Flux.from(publisher).blockFirst(); @@ -78,19 +78,19 @@ void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() { .filter(new Document("filter", 1)) .finalizeFunction(FINALIZE_FUNCTION) .limit(999) - .maxTime(10, SECONDS) + .maxTime(100, MILLISECONDS) .scope(new Document("scope", 1)) .sort(Sorts.ascending("sort")) .verbose(false); - expectedOperation - .collation(COLLATION) + expectedOperation = new MapReduceWithInlineResultsOperation<>( + NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + getDefaultCodecRegistry().get(Document.class)) + .verbose(true) .collation(COLLATION) .filter(BsonDocument.parse("{filter: 1}")) .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION)) .limit(999) - .maxTime(10, SECONDS) - .maxTime(10, SECONDS) .scope(new BsonDocument("scope", new BsonInt32(1))) .sort(new BsonDocument("sort", new BsonInt32(1))) .verbose(false); @@ -114,9 +114,7 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() { .collectionName(NAMESPACE.getCollectionName()); MapReduceToCollectionOperation expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, - new BsonJavaScript(MAP_FUNCTION), - new BsonJavaScript(REDUCE_FUNCTION), - NAMESPACE.getCollectionName(), + new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED).verbose(true); // default input should be as expected @@ -131,19 +129,19 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() { .filter(new Document("filter", 1)) .finalizeFunction(FINALIZE_FUNCTION) .limit(999) - .maxTime(10, SECONDS) + .maxTime(100, MILLISECONDS) .scope(new Document("scope", 1)) .sort(Sorts.ascending("sort")) .verbose(false); - expectedOperation + expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, new BsonJavaScript(MAP_FUNCTION), + new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED) + .verbose(true) .collation(COLLATION) .bypassDocumentValidation(true) .filter(BsonDocument.parse("{filter: 1}")) .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION)) .limit(999) - .maxTime(10, SECONDS) - .maxTime(10, SECONDS) .scope(new BsonDocument("scope", new BsonInt32(1))) .sort(new BsonDocument("sort", new BsonInt32(1))) .verbose(false); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java new file mode 100644 index 00000000000..b79d3a645d9 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java @@ -0,0 +1,237 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import com.mongodb.reactivestreams.client.MongoCluster; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + + +public class MongoClusterImplTest extends TestHelper { + + @Mock + private ClientSession clientSession; + + private final MongoClusterImpl mongoCluster = createMongoCluster(); + private final MongoOperationPublisher mongoOperationPublisher = mongoCluster.getMongoOperationPublisher(); + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec()); + MongoCluster newMongoCluster = mongoCluster.withCodecRegistry(codecRegistry); + assertTrue(newMongoCluster.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withReadConcern() { + assertEquals(ReadConcern.AVAILABLE, mongoCluster.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(ReadPreference.secondaryPreferred(), mongoCluster.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(1000, mongoCluster.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(WriteConcern.MAJORITY, mongoCluster.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + + @Test + void testListDatabases() { + assertAll("listDatabases", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabases((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabases((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> mongoCluster.listDatabases(clientSession, null))), + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(), "Default"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(clientSession), "With session"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher + .withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(BsonDocument.class), "Alternative class"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher + .withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(clientSession, BsonDocument.class), + "Alternative class with session"); + } + ); + } + + @Test + void testListDatabaseNames() { + assertAll("listDatabaseNames", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabaseNames(null))), + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher).nameOnly(true); + + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabaseNames(), "Default"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher).nameOnly(true); + + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabaseNames(clientSession), "With session"); + } + ); + } + + @Test + void testWatch() { + List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); + assertAll("watch", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((List) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch(pipeline, null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch(null, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> mongoCluster.watch(null, pipeline, Document.class)) + ), + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + Document.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(), "Default"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + Document.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(pipeline), "With pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(BsonDocument.class), + "With result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(pipeline, BsonDocument.class), + "With pipeline & result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + Document.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession), "with session"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + Document.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, pipeline), "With session & pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, BsonDocument.class), + "With session & resultClass"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, pipeline, BsonDocument.class), + "With clientSession, pipeline & result class"); + } + ); + } + + @Test + void testStartSession() { + MongoClusterImpl mongoCluster = createMongoCluster(); + + // Validation + assertThrows(IllegalArgumentException.class, () -> mongoCluster.startSession(null)); + + // Default + Mono expected = mongoCluster.getClientSessionHelper() + .createClientSessionMono(ClientSessionOptions.builder().build(), OPERATION_EXECUTOR); + assertPublisherIsTheSameAs(expected, mongoCluster.startSession(), "Default"); + + // with options + ClientSessionOptions options = ClientSessionOptions.builder() + .causallyConsistent(true) + .defaultTransactionOptions(TransactionOptions.builder().readConcern(ReadConcern.LINEARIZABLE).build()) + .build(); + expected = mongoCluster.getClientSessionHelper().createClientSessionMono(options, OPERATION_EXECUTOR); + assertPublisherIsTheSameAs(expected, mongoCluster.startSession(options), "with options"); + + } + + private MongoClusterImpl createMongoCluster() { + return new MongoClusterImpl(mock(Cluster.class), null, OPERATION_EXECUTOR, mock(ServerSessionPool.class), + mock(ClientSessionHelper.class), OPERATION_PUBLISHER); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java index 1cd31102611..97b7bbf0d78 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java @@ -18,6 +18,9 @@ import com.mongodb.CreateIndexCommitQuorum; import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.client.model.BulkWriteOptions; import com.mongodb.client.model.Collation; @@ -52,8 +55,11 @@ import com.mongodb.reactivestreams.client.DistinctPublisher; import com.mongodb.reactivestreams.client.FindPublisher; import com.mongodb.reactivestreams.client.ListIndexesPublisher; +import com.mongodb.reactivestreams.client.MongoCollection; import org.bson.BsonDocument; import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import org.junit.jupiter.api.Test; import org.mockito.Mock; @@ -65,7 +71,9 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class MongoCollectionImplTest extends TestHelper { @@ -80,6 +88,40 @@ public class MongoCollectionImplTest extends TestHelper { private final List pipeline = singletonList(filter); private final Collation collation = Collation.builder().locale("de").build(); + @Test + public void withDocumentClass() { + assertEquals(BsonDocument.class, collection.withDocumentClass(BsonDocument.class).getDocumentClass()); + } + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec()); + MongoCollection newCollection = collection.withCodecRegistry(codecRegistry); + assertTrue(newCollection.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withReadConcern() { + assertEquals(ReadConcern.AVAILABLE, collection.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(ReadPreference.secondaryPreferred(), collection.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(1000, collection.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(WriteConcern.MAJORITY, collection.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + @Test void testAggregate() { assertAll("Aggregate tests", diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java index 77be004edda..f50e44a7db6 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java @@ -16,7 +16,9 @@ package com.mongodb.reactivestreams.client.internal; +import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateViewOptions; @@ -27,19 +29,25 @@ import com.mongodb.reactivestreams.client.ClientSession; import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; import com.mongodb.reactivestreams.client.ListCollectionsPublisher; +import com.mongodb.reactivestreams.client.MongoDatabase; import org.bson.BsonDocument; import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import org.junit.jupiter.api.Test; import org.mockito.Mock; import org.reactivestreams.Publisher; import java.util.List; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class MongoDatabaseImplTest extends TestHelper { @@ -49,6 +57,35 @@ public class MongoDatabaseImplTest extends TestHelper { private final MongoDatabaseImpl database = new MongoDatabaseImpl(OPERATION_PUBLISHER.withDatabase("db")); private final MongoOperationPublisher mongoOperationPublisher = database.getMongoOperationPublisher(); + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec()); + MongoDatabase newDatabase = database.withCodecRegistry(codecRegistry); + assertTrue(newDatabase.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withReadConcern() { + assertEquals(ReadConcern.AVAILABLE, database.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(ReadPreference.secondaryPreferred(), database.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(1000, database.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(WriteConcern.MAJORITY, database.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + @Test void testAggregate() { List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java new file mode 100644 index 00000000000..42d6bb14c5c --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.internal; + + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; + + +public class MongoOperationPublisherTest { + + private static final OperationExecutor OPERATION_EXECUTOR; + + static { + OPERATION_EXECUTOR = mock(OperationExecutor.class); + Mockito.lenient().doAnswer(invocation -> OPERATION_EXECUTOR) + .when(OPERATION_EXECUTOR) + .withTimeoutSettings(any()); + } + private static final MongoNamespace MONGO_NAMESPACE = new MongoNamespace("a.b"); + + private static final MongoOperationPublisher DEFAULT_MOP = new MongoOperationPublisher<>( + MONGO_NAMESPACE, Document.class, MongoClientSettings.getDefaultCodecRegistry(), ReadPreference.primary(), + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, true, UuidRepresentation.STANDARD, + null, TIMEOUT_SETTINGS_WITH_TIMEOUT, OPERATION_EXECUTOR); + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = DEFAULT_MOP.withCodecRegistry(CodecRegistries.fromCodecs(new TestHelper.MyLongCodec())).getCodecRegistry(); + assertTrue(codecRegistry.get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withDatabase() { + assertEquals(new MongoNamespace("c.ignored"), DEFAULT_MOP.withDatabase("c").getNamespace()); + } + + @Test + public void withDocumentClass() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withDocumentClass(Document.class)); + assertEquals(BsonDocument.class, DEFAULT_MOP.withDocumentClass(BsonDocument.class).getDocumentClass()); + } + + @Test + public void withDatabaseAndDocumentClass() { + MongoOperationPublisher alternative = DEFAULT_MOP.withDatabaseAndDocumentClass("c", BsonDocument.class); + assertEquals(BsonDocument.class, alternative.getDocumentClass()); + assertEquals(new MongoNamespace("c.ignored"), alternative.getNamespace()); + } + + @Test + public void withNamespaceAndDocumentClass() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class)); + + MongoOperationPublisher alternative = DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("c.d"), + BsonDocument.class); + assertEquals(BsonDocument.class, alternative.getDocumentClass()); + assertEquals(new MongoNamespace("c.d"), alternative.getNamespace()); + } + + + @Test + public void withNamespace() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class)); + assertEquals(new MongoNamespace("c.d"), DEFAULT_MOP.withNamespace(new MongoNamespace("c.d")).getNamespace()); + } + + @Test + public void withReadConcern() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadConcern(ReadConcern.DEFAULT)); + assertEquals(ReadConcern.AVAILABLE, DEFAULT_MOP.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadPreference(ReadPreference.primary())); + assertEquals(ReadPreference.secondaryPreferred(), DEFAULT_MOP.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withTimeout(60_000, TimeUnit.MILLISECONDS)); + assertEquals(1000, DEFAULT_MOP.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeoutMS()); + assertThrows(IllegalArgumentException.class, () -> DEFAULT_MOP.withTimeout(500, TimeUnit.NANOSECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withWriteConcern(WriteConcern.ACKNOWLEDGED)); + assertEquals(WriteConcern.MAJORITY, DEFAULT_MOP.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java index c293df899b4..46f4e86762b 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java @@ -30,9 +30,14 @@ import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.lang.NonNull; import com.mongodb.lang.Nullable; +import org.bson.BsonReader; +import org.bson.BsonWriter; import org.bson.Document; import org.bson.UuidRepresentation; import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; import org.bson.codecs.configuration.CodecRegistry; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; @@ -52,8 +57,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static java.util.stream.Collectors.toList; import static java.util.stream.Collectors.toMap; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; @@ -81,6 +88,9 @@ public class TestHelper { static { OperationExecutor executor = mock(OperationExecutor.class); + Mockito.lenient().doAnswer(invocation -> executor) + .when(executor).withTimeoutSettings(any()); + Mockito.lenient().doAnswer(invocation -> Mono.empty()) .when(executor) .execute(any(), any(), any()); @@ -97,7 +107,7 @@ static MongoOperationPublisher createMongoOperationPublisher(final Ope return new MongoOperationPublisher<>(NAMESPACE, Document.class, getDefaultCodecRegistry(), ReadPreference.primary(), ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, true, - UuidRepresentation.STANDARD, null, executor); + UuidRepresentation.STANDARD, null, TIMEOUT_SETTINGS, executor); } @@ -148,7 +158,10 @@ private static Map getClassGetterValues(final Object instance) { } - private static Map> getClassPrivateFieldValues(final Object instance) { + private static Map> getClassPrivateFieldValues(@Nullable final Object instance) { + if (instance == null) { + return emptyMap(); + } return Arrays.stream(instance.getClass().getDeclaredFields()) .filter(field -> Modifier.isPrivate(field.getModifiers())) .collect(toMap(Field::getName, field -> { @@ -264,4 +277,21 @@ void configureBatchCursor() { public AsyncBatchCursor getBatchCursor() { return batchCursor; } + + public static class MyLongCodec implements Codec { + + @Override + public Long decode(final BsonReader reader, final DecoderContext decoderContext) { + return 42L; + } + + @Override + public void encode(final BsonWriter writer, final Long value, final EncoderContext encoderContext) { + } + + @Override + public Class getEncoderClass() { + return Long.class; + } + } } diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java index 99c9642f8d6..6989d0b2d2e 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java @@ -18,6 +18,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.lang.Nullable; @@ -59,6 +60,16 @@ public Mono execute(final AsyncWriteOperation operation, final ReadCon return createMono(); } + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) { + return this; + } + + @Override + public TimeoutSettings getTimeoutSettings() { + throw new UnsupportedOperationException("Not supported"); + } + Mono createMono() { return Mono.create(sink -> { Object response = responses.remove(0); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java new file mode 100644 index 00000000000..01924c61f0e --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java @@ -0,0 +1,233 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.time.Timeout; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.internal.mockito.MongoMockito.mock; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutMono; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.longThat; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +class TimeoutHelperTest { + + private static final String TIMEOUT_ERROR_MESSAGE = "message"; + private static final String DEFAULT_TIMEOUT_ERROR_MESSAGE = "Operation exceeded the timeout limit."; + + @Test + void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsNull() { + //given + MongoCollection collection = mock(MongoCollection.class); + + //when + MongoCollection result = collectionWithTimeout(collection, null); + MongoCollection monoResult = collectionWithTimeoutMono(collection, null).block(); + MongoCollection monoResultDeferred = collectionWithTimeoutDeferred(collection, null).block(); + + //then + assertEquals(collection, result); + assertEquals(collection, monoResult); + assertEquals(collection, monoResultDeferred); + } + + @Test + void shouldNotSetRemainingTimeoutDatabaseWhenTimeoutIsNull() { + //given + MongoDatabase database = mock(MongoDatabase.class); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, null); + MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, null).block(); + + //then + assertEquals(database, result); + assertEquals(database, monoResultDeferred); + } + + @Test + void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsInfinite() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + + //when + MongoCollection result = collectionWithTimeout(collection, Timeout.infinite()); + MongoCollection monoResult = collectionWithTimeoutMono(collection, Timeout.infinite()).block(); + MongoCollection monoResultDeferred = collectionWithTimeoutDeferred(collection, Timeout.infinite()).block(); + + //then + assertEquals(collectionWithTimeout, result); + assertEquals(collectionWithTimeout, monoResult); + assertEquals(collectionWithTimeout, monoResultDeferred); + verify(collection, times(3)) + .withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldNotSetRemainingTimeoutOnDatabaseWhenTimeoutIsInfinite() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()); + MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()).block(); + + //then + assertEquals(databaseWithTimeout, result); + assertEquals(databaseWithTimeout, monoResultDeferred); + verify(database, times(2)) + .withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldSetRemainingTimeoutOnCollectionWhenTimeout() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoCollection result = collectionWithTimeout(collection, timeout); + MongoCollection monoResult = collectionWithTimeoutMono(collection, timeout).block(); + MongoCollection monoResultDeferred = collectionWithTimeoutDeferred(collection, timeout).block(); + + //then + verify(collection, times(3)) + .withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(collectionWithTimeout, result); + assertEquals(collectionWithTimeout, monoResult); + assertEquals(collectionWithTimeout, monoResultDeferred); + } + + @Test + void shouldSetRemainingTimeoutOnDatabaseWhenTimeout() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout); + MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout).block(); + + //then + verify(database, times(2)) + .withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(databaseWithTimeout, result); + assertEquals(databaseWithTimeout, monoResultDeferred); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, timeout)); + MongoOperationTimeoutException mongoExecutionTimeoutExceptionMono = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutMono(collection, timeout).block()); + MongoOperationTimeoutException mongoExecutionTimeoutExceptionDeferred = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutDeferred(collection, timeout).block()); + + //then + assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionMono.getMessage()); + assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionDeferred.getMessage()); + verifyNoInteractions(collection); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + MongoOperationTimeoutException mongoExecutionTimeoutExceptionDeferred = + assertThrows(MongoOperationTimeoutException.class, + () -> databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout) + .block()); + + //then + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionDeferred.getMessage()); + verifyNoInteractions(database); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, timeout)); + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutMono(collection, timeout).block()); + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutDeferred(collection, timeout).block()); + + //then + + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + assertThrows(MongoOperationTimeoutException.class, + () -> databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout).block()); + + //then + verifyNoInteractions(database); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java new file mode 100644 index 00000000000..38d19647fd7 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.Fixture; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.event.CommandEvent; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.reactivestreams.client.TestEventPublisher; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.TestSubscriber; +import com.mongodb.reactivestreams.client.gridfs.GridFSBucket; +import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscription; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +class GridFSUploadPublisherTest { + private static final String GRID_FS_BUCKET_NAME = "db.fs"; + private TestCommandListener commandListener; + + protected MongoClientSettings.Builder getMongoClientSettingsBuilder() { + commandListener.reset(); + return Fixture.getMongoClientSettingsBuilder() + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.primary()) + .addCommandListener(commandListener); + } + + @Test + void shouldTimeoutWhenSourcePublisherCompletionExceedsOverallOperationTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .timeout(rtt + 800, TimeUnit.MILLISECONDS).build())) { + MongoDatabase database = client.getDatabase(getDefaultDatabaseName()); + GridFSBucket gridFsBucket = GridFSBuckets.create(database, GRID_FS_BUCKET_NAME); + + TestEventPublisher eventPublisher = new TestEventPublisher<>(); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + //when + gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream()) + .subscribe(testSubscriber); + testSubscriber.requestMore(1); + + //then + testSubscriber.assertTerminalEvent(); + + List onErrorEvents = testSubscriber.getOnErrorEvents(); + assertEquals(1, onErrorEvents.size()); + + Throwable throwable = onErrorEvents.get(0); + assertEquals(MongoOperationTimeoutException.class, throwable.getClass()); + assertEquals("GridFS waiting for data from the source Publisher exceeded the timeout limit.", throwable.getMessage()); + + //assert no chunk has been inserted as we have not sent any data from source publisher. + for (CommandEvent event : commandListener.getEvents()) { + assertNotEquals("insert", event.getCommandName()); + } + } + } + + @Test + void shouldCancelSubscriptionToSourceWhenOperationTimeoutOccurs() throws Exception { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .timeout(rtt + 1000, TimeUnit.MILLISECONDS).build())) { + MongoDatabase database = client.getDatabase(getDefaultDatabaseName()); + GridFSBucket gridFsBucket = GridFSBuckets.create(database, GRID_FS_BUCKET_NAME); + + TestEventPublisher testEventPublisher = new TestEventPublisher<>(); + CompletableFuture subscriptionSignal = new CompletableFuture<>(); + Flux eventStream = testEventPublisher.getEventStream().doOnSubscribe(subscriptionSignal::complete); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + //when + gridFsBucket.uploadFromPublisher("filename", eventStream) + .subscribe(testSubscriber); + testSubscriber.requestMore(1); + + //then + subscriptionSignal.get(TIMEOUT_DURATION.toMillis(), TimeUnit.MILLISECONDS); + assertEquals(1, testEventPublisher.currentSubscriberCount()); + //We wait for timeout to occur here + testSubscriber.assertTerminalEvent(); + assertEquals(0, testEventPublisher.currentSubscriberCount()); + } + } + + @BeforeEach + public void setUp() { + commandListener = new TestCommandListener(); + } + + @AfterEach + public void tearDown() { + CollectionHelper.dropDatabase(getDefaultDatabaseName()); + } +} diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala b/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala index 93ab4bca823..192cf1ee912 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala @@ -39,7 +39,8 @@ class ClientSideEncryptionTest( mongoClient.getDatabase(databaseName) @After - def cleanUp(): Unit = { + override def cleanUp(): Unit = { + super.cleanUp() if (mongoClient != null) mongoClient.close() } } diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala index 35c6d88defb..d9cec1ede39 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala @@ -17,6 +17,7 @@ package org.mongodb.scala.syncadapter import com.mongodb.ExplainVerbosity import com.mongodb.client.AggregateIterable +import org.mongodb.scala.TimeoutMode import com.mongodb.client.model.Collation import org.bson.conversions.Bson import org.bson.{ BsonValue, Document } @@ -42,6 +43,11 @@ case class SyncAggregateIterable[T](wrapped: AggregateObservable[T]) this } + override def timeoutMode(timeoutMode: TimeoutMode): AggregateIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def maxTime(maxTime: Long, timeUnit: TimeUnit): AggregateIterable[T] = { wrapped.maxTime(maxTime, timeUnit) this @@ -102,5 +108,4 @@ case class SyncAggregateIterable[T](wrapped: AggregateObservable[T]) .explain[E](verbosity)(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass)) .toFuture() .get() - } diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala index 47687911bad..a517d027cd2 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala @@ -16,12 +16,13 @@ package org.mongodb.scala.syncadapter +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.Collation import com.mongodb.client.model.changestream.{ ChangeStreamDocument, FullDocument, FullDocumentBeforeChange } import com.mongodb.client.{ ChangeStreamIterable, MongoChangeStreamCursor } import com.mongodb.{ ServerAddress, ServerCursor } import org.bson.{ BsonDocument, BsonTimestamp, BsonValue } -import org.mongodb.scala.ChangeStreamObservable +import org.mongodb.scala.{ ChangeStreamObservable, TimeoutMode } import java.util.concurrent.TimeUnit diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala index 38a9618a281..2866ce7427d 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala @@ -18,6 +18,7 @@ package org.mongodb.scala.syncadapter import com.mongodb.{ ClientSessionOptions, MongoInterruptedException, ServerAddress, TransactionOptions } import com.mongodb.client.{ ClientSession => JClientSession, TransactionBody } +import com.mongodb.internal.TimeoutContext import com.mongodb.session.ServerSession import org.bson.{ BsonDocument, BsonTimestamp } import org.mongodb.scala._ @@ -93,4 +94,6 @@ case class SyncClientSession(wrapped: ClientSession, originator: Object) extends throw new MongoInterruptedException(null, e) } } + + override def getTimeoutContext: TimeoutContext = wrapped.getTimeoutContext } diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala index 5f007071db3..b105ac0897c 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala @@ -20,7 +20,7 @@ import com.mongodb.client.DistinctIterable import com.mongodb.client.model.Collation import org.bson.BsonValue import org.bson.conversions.Bson -import org.mongodb.scala.DistinctObservable +import org.mongodb.scala.{ DistinctObservable, TimeoutMode } import java.util.concurrent.TimeUnit @@ -42,6 +42,11 @@ case class SyncDistinctIterable[T](wrapped: DistinctObservable[T]) this } + override def timeoutMode(timeoutMode: TimeoutMode): DistinctIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def collation(collation: Collation): DistinctIterable[T] = { wrapped.collation(collation) this diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala index e66f70913b6..505241ab39a 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala @@ -21,9 +21,9 @@ import com.mongodb.client.model.Collation import com.mongodb.{ CursorType, ExplainVerbosity } import org.bson.Document import org.bson.conversions.Bson -import org.mongodb.scala.FindObservable import org.mongodb.scala.bson.BsonValue import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.{ FindObservable, TimeoutMode } import java.util.concurrent.TimeUnit import scala.reflect.ClassTag @@ -84,6 +84,11 @@ case class SyncFindIterable[T](wrapped: FindObservable[T]) extends SyncMongoIter this } + override def timeoutMode(timeoutMode: TimeoutMode): FindIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def collation(collation: Collation): FindIterable[T] = { wrapped.collation(collation) this diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala index 08fac0c9bb3..aa121ae99cf 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala @@ -17,8 +17,8 @@ package org.mongodb.scala.syncadapter import com.mongodb.client.ListCollectionsIterable import org.bson.conversions.Bson -import org.mongodb.scala.ListCollectionsObservable import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.{ ListCollectionsObservable, TimeoutMode } import java.util.concurrent.TimeUnit @@ -40,6 +40,11 @@ case class SyncListCollectionsIterable[T](wrapped: ListCollectionsObservable[T]) this } + override def timeoutMode(timeoutMode: TimeoutMode): ListCollectionsIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def comment(comment: String): ListCollectionsIterable[T] = { wrapped.comment(comment) this diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala index 0b5c82d1fc0..aa841c1be0a 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala @@ -2,8 +2,8 @@ package org.mongodb.scala.syncadapter import com.mongodb.client.ListDatabasesIterable import org.bson.conversions.Bson -import org.mongodb.scala.ListDatabasesObservable import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.{ ListDatabasesObservable, TimeoutMode } import java.util.concurrent.TimeUnit @@ -20,6 +20,11 @@ case class SyncListDatabasesIterable[T](wrapped: ListDatabasesObservable[T]) this } + override def timeoutMode(timeoutMode: TimeoutMode): ListDatabasesIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def filter(filter: Bson): ListDatabasesIterable[T] = { wrapped.filter(filter) this diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala index 22194de53aa..86db80bc6e4 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala @@ -17,8 +17,8 @@ package org.mongodb.scala.syncadapter import com.mongodb.client.ListIndexesIterable -import org.mongodb.scala.ListIndexesObservable import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.{ ListIndexesObservable, TimeoutMode } import java.util.concurrent.TimeUnit @@ -35,6 +35,11 @@ case class SyncListIndexesIterable[T](wrapped: ListIndexesObservable[T]) this } + override def timeoutMode(timeoutMode: TimeoutMode): ListIndexesIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def comment(comment: String): ListIndexesIterable[T] = { wrapped.comment(comment) this diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala index 6fb7a6d2199..672b97aff9e 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala @@ -20,8 +20,8 @@ import com.mongodb.ExplainVerbosity import com.mongodb.client.ListSearchIndexesIterable import com.mongodb.client.model.Collation import org.bson.{ BsonValue, Document } -import org.mongodb.scala.ListSearchIndexesObservable import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.{ ListSearchIndexesObservable, TimeoutMode } import java.util.concurrent.TimeUnit import scala.reflect.ClassTag @@ -45,6 +45,11 @@ case class SyncListSearchIndexesIterable[T](wrapped: ListSearchIndexesObservable this } + override def timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def maxTime(maxTime: Long, timeUnit: TimeUnit): ListSearchIndexesIterable[T] = { wrapped.maxTime(maxTime, timeUnit) this diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala index 6fce83ffa4b..73af2f6f62a 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala @@ -16,12 +16,12 @@ package org.mongodb.scala.syncadapter -import java.util.concurrent.TimeUnit - import com.mongodb.client.MapReduceIterable import com.mongodb.client.model.{ Collation, MapReduceAction } import org.bson.conversions.Bson -import org.mongodb.scala.MapReduceObservable +import org.mongodb.scala.{ MapReduceObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit case class SyncMapReduceIterable[T](wrapped: MapReduceObservable[T]) extends SyncMongoIterable[T] @@ -88,6 +88,11 @@ case class SyncMapReduceIterable[T](wrapped: MapReduceObservable[T]) this } + override def timeoutMode(timeoutMode: TimeoutMode): MapReduceIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + override def bypassDocumentValidation(bypassDocumentValidation: java.lang.Boolean): MapReduceIterable[T] = { wrapped.bypassDocumentValidation(bypassDocumentValidation) this @@ -97,4 +102,5 @@ case class SyncMapReduceIterable[T](wrapped: MapReduceObservable[T]) wrapped.collation(collation) this } + } diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala index 9bb1ec9d6d8..4daa6d94ef1 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala @@ -11,84 +11,10 @@ import scala.collection.JavaConverters._ import scala.concurrent.Await import scala.reflect.ClassTag -case class SyncMongoClient(wrapped: MongoClient) extends JMongoClient { - - override def getDatabase(databaseName: String): JMongoDatabase = - SyncMongoDatabase(wrapped.getDatabase(databaseName)) - - override def startSession: ClientSession = - SyncClientSession(Await.result(wrapped.startSession().head(), WAIT_DURATION), this) - - override def startSession(options: ClientSessionOptions): ClientSession = - SyncClientSession(Await.result(wrapped.startSession(options).head(), WAIT_DURATION), this) +case class SyncMongoClient(wrapped: MongoClient) extends SyncMongoCluster(wrapped) with JMongoClient { override def close(): Unit = wrapped.close() - override def listDatabaseNames = throw new UnsupportedOperationException - - override def listDatabaseNames(clientSession: ClientSession) = throw new UnsupportedOperationException - - override def listDatabases = new SyncListDatabasesIterable[Document](wrapped.listDatabases[Document]()) - - override def listDatabases(clientSession: ClientSession) = throw new UnsupportedOperationException - - override def listDatabases[TResult](resultClass: Class[TResult]) = - new SyncListDatabasesIterable[TResult]( - wrapped.listDatabases[TResult]()( - DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], - ClassTag(resultClass) - ) - ) - - override def listDatabases[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = - throw new UnsupportedOperationException - - override def watch = new SyncChangeStreamIterable[Document](wrapped.watch[Document]()) - - override def watch[TResult](resultClass: Class[TResult]) = - new SyncChangeStreamIterable[TResult]( - wrapped.watch[TResult]()(DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], ClassTag(resultClass)) - ) - - override def watch(pipeline: java.util.List[_ <: Bson]) = - new SyncChangeStreamIterable[Document](wrapped.watch[Document](pipeline.asScala.toSeq)) - - override def watch[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) = - new SyncChangeStreamIterable[TResult]( - wrapped.watch[TResult](pipeline.asScala.toSeq)( - DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], - ClassTag(resultClass) - ) - ) - - override def watch(clientSession: ClientSession) = - new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession))) - - override def watch[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = - new SyncChangeStreamIterable[TResult]( - wrapped.watch(unwrap(clientSession))( - DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], - ClassTag(resultClass) - ) - ) - - override def watch(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) = - new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession), pipeline.asScala.toSeq)) - - override def watch[TResult]( - clientSession: ClientSession, - pipeline: java.util.List[_ <: Bson], - resultClass: Class[TResult] - ) = - new SyncChangeStreamIterable[TResult]( - wrapped.watch[TResult](unwrap(clientSession), pipeline.asScala.toSeq)( - DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], - ClassTag(resultClass) - ) - ) - override def getClusterDescription = throw new UnsupportedOperationException - private def unwrap(clientSession: ClientSession): org.mongodb.scala.ClientSession = - clientSession.asInstanceOf[SyncClientSession].wrapped } diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala new file mode 100644 index 00000000000..3871aded144 --- /dev/null +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala @@ -0,0 +1,126 @@ +package org.mongodb.scala.syncadapter + +import com.mongodb.{ ClientSessionOptions, ReadConcern, ReadPreference, WriteConcern } +import com.mongodb.client.{ ClientSession, MongoCluster => JMongoCluster, MongoDatabase => JMongoDatabase } +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson +import org.mongodb.scala.MongoCluster +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo + +import java.util.concurrent.TimeUnit +import scala.collection.JavaConverters._ +import scala.concurrent.Await +import scala.concurrent.duration.Duration +import scala.reflect.ClassTag + +object SyncMongoCluster { + + def apply(wrapped: MongoCluster): SyncMongoCluster = new SyncMongoCluster(wrapped) +} + +class SyncMongoCluster(wrapped: MongoCluster) extends JMongoCluster { + + override def getCodecRegistry: CodecRegistry = wrapped.codecRegistry + + override def getReadPreference: ReadPreference = wrapped.readPreference + + override def getWriteConcern: WriteConcern = wrapped.writeConcern + + override def getReadConcern: ReadConcern = wrapped.readConcern + + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + val timeout = wrapped.timeout.map(d => timeUnit.convert(d.toMillis, TimeUnit.MILLISECONDS)) + if (timeout.isDefined) timeout.get else null + } + + override def withCodecRegistry(codecRegistry: CodecRegistry): JMongoCluster = + SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + override def withReadPreference(readPreference: ReadPreference): JMongoCluster = + SyncMongoCluster(wrapped.withReadPreference(readPreference)) + + override def withWriteConcern(writeConcern: WriteConcern): JMongoCluster = + SyncMongoCluster(wrapped.withWriteConcern(writeConcern)) + + override def withReadConcern(readConcern: ReadConcern): JMongoCluster = + SyncMongoCluster(wrapped.withReadConcern(readConcern)) + + override def withTimeout(timeout: Long, timeUnit: TimeUnit): JMongoCluster = + SyncMongoCluster(wrapped.withTimeout(Duration(timeout, timeUnit))) + + override def getDatabase(databaseName: String): JMongoDatabase = + SyncMongoDatabase(wrapped.getDatabase(databaseName)) + + override def startSession: ClientSession = + SyncClientSession(Await.result(wrapped.startSession().head(), WAIT_DURATION), this) + + override def startSession(options: ClientSessionOptions): ClientSession = + SyncClientSession(Await.result(wrapped.startSession(options).head(), WAIT_DURATION), this) + + override def listDatabaseNames = throw new UnsupportedOperationException + + override def listDatabaseNames(clientSession: ClientSession) = throw new UnsupportedOperationException + + override def listDatabases = new SyncListDatabasesIterable[Document](wrapped.listDatabases[Document]()) + + override def listDatabases(clientSession: ClientSession) = throw new UnsupportedOperationException + + override def listDatabases[TResult](resultClass: Class[TResult]) = + new SyncListDatabasesIterable[TResult]( + wrapped.listDatabases[TResult]()( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def listDatabases[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + throw new UnsupportedOperationException + + override def watch = new SyncChangeStreamIterable[Document](wrapped.watch[Document]()) + + override def watch[TResult](resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult]()(DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], ClassTag(resultClass)) + ) + + override def watch(pipeline: java.util.List[_ <: Bson]) = + new SyncChangeStreamIterable[Document](wrapped.watch[Document](pipeline.asScala.toSeq)) + + override def watch[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult](pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def watch(clientSession: ClientSession) = + new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession))) + + override def watch[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch(unwrap(clientSession))( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def watch(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) = + new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession), pipeline.asScala.toSeq)) + + override def watch[TResult]( + clientSession: ClientSession, + pipeline: java.util.List[_ <: Bson], + resultClass: Class[TResult] + ) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult](unwrap(clientSession), pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + private def unwrap(clientSession: ClientSession): org.mongodb.scala.ClientSession = + clientSession.asInstanceOf[SyncClientSession].wrapped +} diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala index 380c6d272f3..7d97d794c42 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala @@ -34,7 +34,9 @@ import org.mongodb.scala.bson.DefaultHelper.DefaultsTo import org.mongodb.scala.result.{ InsertManyResult, InsertOneResult } import java.util +import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } import scala.reflect.ClassTag case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCollection[T] { @@ -53,6 +55,13 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol override def getReadConcern: ReadConcern = wrapped.readConcern + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + wrapped.timeout match { + case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS) + case None => null + } + } + override def withDocumentClass[NewTDocument](clazz: Class[NewTDocument]): JMongoCollection[NewTDocument] = SyncMongoCollection[NewTDocument]( wrapped.withDocumentClass[NewTDocument]()( @@ -73,6 +82,9 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol override def withReadConcern(readConcern: ReadConcern): JMongoCollection[T] = SyncMongoCollection[T](wrapped.withReadConcern(readConcern)) + override def withTimeout(timeout: Long, timeUnit: TimeUnit): JMongoCollection[T] = + SyncMongoCollection[T](wrapped.withTimeout(Duration(timeout, timeUnit))) + override def countDocuments: Long = wrapped.countDocuments().toFuture().get() override def countDocuments(filter: Bson): Long = wrapped.countDocuments(filter).toFuture().get() @@ -556,7 +568,7 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol override def createSearchIndex(definition: Bson): String = wrapped.createSearchIndex(definition).toFuture().get() - override def createSearchIndexes(searchIndexModels: util.List[SearchIndexModel]): util.List[String] = + override def createSearchIndexes(searchIndexModels: java.util.List[SearchIndexModel]): java.util.List[String] = wrapped.createSearchIndexes(searchIndexModels.asScala.toList).toFuture().get().asJava override def updateSearchIndex(indexName: String, definition: Bson): Unit = diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala index 036d5589957..548289fd938 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala @@ -24,7 +24,9 @@ import org.bson.conversions.Bson import org.mongodb.scala.MongoDatabase import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ +import scala.concurrent.duration.MILLISECONDS import scala.reflect.ClassTag case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase { @@ -39,6 +41,13 @@ case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase { override def getReadConcern: ReadConcern = wrapped.readConcern + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + wrapped.timeout match { + case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS) + case None => null + } + } + override def withCodecRegistry(codecRegistry: CodecRegistry) = SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) @@ -48,6 +57,8 @@ case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase { override def withReadConcern(readConcern: ReadConcern) = throw new UnsupportedOperationException + override def withTimeout(timeout: Long, timeUnit: TimeUnit) = throw new UnsupportedOperationException + override def getCollection(collectionName: String) = SyncMongoCollection[Document](wrapped.getCollection(collectionName)) diff --git a/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala index 20d5db9fd64..1a360c1a7c1 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala @@ -17,6 +17,7 @@ package org.mongodb.scala import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.{ Alpha, Reason } import java.util.concurrent.TimeUnit import com.mongodb.reactivestreams.client.AggregatePublisher @@ -198,6 +199,28 @@ case class AggregateObservable[TResult](private val wrapped: AggregatePublisher[ */ def toCollection(): SingleObservable[Unit] = wrapped.toCollection() + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * If the `timeout` is set then: + * + * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME` + * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error + * to configure it as: `TimeoutMode.CURSOR_LIFETIME` + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): AggregateObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Helper to return a single observable limited to the first result. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala index b803ad54a1c..4a50d7767e1 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala @@ -16,6 +16,8 @@ package org.mongodb.scala +import com.mongodb.annotations.{ Alpha, Reason } + import java.util.concurrent.TimeUnit import com.mongodb.reactivestreams.client.DistinctPublisher import org.mongodb.scala.bson.BsonValue @@ -109,6 +111,22 @@ case class DistinctObservable[TResult](private val wrapped: DistinctPublisher[TR this } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): DistinctObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Helper to return a single observable limited to the first result. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala index 575ca66e8c8..c7cb7a158ae 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala @@ -16,6 +16,7 @@ package org.mongodb.scala +import com.mongodb.annotations.{ Alpha, Reason } import com.mongodb.reactivestreams.client.FindPublisher import com.mongodb.{ CursorType, ExplainVerbosity } import org.mongodb.scala.bson.BsonValue @@ -332,6 +333,28 @@ case class FindObservable[TResult](private val wrapped: FindPublisher[TResult]) this } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * If the `timeout` is set then: + * + * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME` + * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error + * to configure it as: `TimeoutMode.CURSOR_LIFETIME` + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): FindObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Explain the execution plan for this operation with the server's default verbosity level * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala index 65b5b61a5d4..c73fbb7118e 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala @@ -16,6 +16,8 @@ package org.mongodb.scala +import com.mongodb.annotations.{ Alpha, Reason } + import java.util.concurrent.TimeUnit import com.mongodb.reactivestreams.client.ListCollectionsPublisher import org.mongodb.scala.bson.BsonValue @@ -94,6 +96,22 @@ case class ListCollectionsObservable[TResult](wrapped: ListCollectionsPublisher[ this } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListCollectionsObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Helper to return a single observable limited to the first result. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala index 1d389eb476e..0b5d5bf2f93 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala @@ -16,6 +16,8 @@ package org.mongodb.scala +import com.mongodb.annotations.{ Alpha, Reason } + import java.util.concurrent.TimeUnit import com.mongodb.reactivestreams.client.ListDatabasesPublisher import org.mongodb.scala.bson.BsonValue @@ -123,6 +125,22 @@ case class ListDatabasesObservable[TResult](wrapped: ListDatabasesPublisher[TRes this } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListDatabasesObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Helper to return a single observable limited to the first result. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala index 8de986edde0..fa8e3d1b24d 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala @@ -16,6 +16,8 @@ package org.mongodb.scala +import com.mongodb.annotations.{ Alpha, Reason } + import java.util.concurrent.TimeUnit import com.mongodb.reactivestreams.client.ListIndexesPublisher import org.mongodb.scala.bson.BsonValue @@ -81,6 +83,22 @@ case class ListIndexesObservable[TResult](wrapped: ListIndexesPublisher[TResult] this } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListIndexesObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Helper to return a single observable limited to the first result. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala index 16b471a21e3..3987e830732 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala @@ -17,6 +17,7 @@ package org.mongodb.scala import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.{ Alpha, Reason } import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher import org.mongodb.scala.bson.BsonValue import org.mongodb.scala.bson.DefaultHelper.DefaultsTo @@ -122,6 +123,28 @@ case class ListSearchIndexesObservable[TResult](wrapped: ListSearchIndexesPublis this } + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * If the `timeout` is set then: + * + * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME` + * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error + * to configure it as: `TimeoutMode.CURSOR_LIFETIME` + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Helper to return a single observable limited to the first result. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala index 9e6ed2b2158..0ccabdaea62 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala @@ -16,8 +16,9 @@ package org.mongodb.scala -import java.util.concurrent.TimeUnit +import com.mongodb.annotations.{ Alpha, Reason } +import java.util.concurrent.TimeUnit import com.mongodb.client.model.MapReduceAction import com.mongodb.reactivestreams.client.MapReducePublisher import org.mongodb.scala.bson.conversions.Bson @@ -221,6 +222,22 @@ case class MapReduceObservable[TResult](wrapped: MapReducePublisher[TResult]) ex */ def toCollection(): SingleObservable[Unit] = wrapped.toCollection() + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): MapReduceObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + /** * Helper to return a single observable limited to the first result. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala index c370077a7d2..c6849c550c1 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala @@ -16,18 +16,13 @@ package org.mongodb.scala -import java.io.Closeable - import com.mongodb.connection.ClusterDescription import com.mongodb.reactivestreams.client.{ MongoClient => JMongoClient, MongoClients } import org.bson.codecs.configuration.CodecRegistries.{ fromProviders, fromRegistries } import org.bson.codecs.configuration.CodecRegistry -import org.mongodb.scala.bson.DefaultHelper.DefaultsTo import org.mongodb.scala.bson.codecs.{ DocumentCodecProvider, IterableCodecProvider } -import org.mongodb.scala.bson.conversions.Bson -import scala.collection.JavaConverters._ -import scala.reflect.ClassTag +import java.io.Closeable /** * Companion object for creating new [[MongoClient]] instances @@ -116,36 +111,7 @@ object MongoClient { * @param wrapped the underlying java MongoClient * @since 1.0 */ -case class MongoClient(private val wrapped: JMongoClient) extends Closeable { - - /** - * Creates a client session. - * - * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations. - * - * @since 2.4 - * @note Requires MongoDB 3.6 or greater - */ - def startSession(): SingleObservable[ClientSession] = wrapped.startSession() - - /** - * Creates a client session. - * - * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations. - * - * @param options the options for the client session - * @since 2.2 - * @note Requires MongoDB 3.6 or greater - */ - def startSession(options: ClientSessionOptions): SingleObservable[ClientSession] = wrapped.startSession(options) - - /** - * Gets the database with the given name. - * - * @param name the name of the database - * @return the database - */ - def getDatabase(name: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(name)) +case class MongoClient(private val wrapped: JMongoClient) extends MongoCluster(wrapped) with Closeable { /** * Close the client, which will close all underlying cached resources, including, for example, @@ -153,118 +119,15 @@ case class MongoClient(private val wrapped: JMongoClient) extends Closeable { */ def close(): Unit = wrapped.close() - /** - * Get a list of the database names - * - * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]] - * @return an iterable containing all the names of all the databases - */ - def listDatabaseNames(): Observable[String] = wrapped.listDatabaseNames() - - /** - * Get a list of the database names - * - * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]] - * - * @param clientSession the client session with which to associate this operation - * @return an iterable containing all the names of all the databases - * @since 2.2 - * @note Requires MongoDB 3.6 or greater - */ - def listDatabaseNames(clientSession: ClientSession): Observable[String] = wrapped.listDatabaseNames(clientSession) - - /** - * Gets the list of databases - * - * @tparam TResult the type of the class to use instead of `Document`. - * @return the fluent list databases interface - */ - def listDatabases[TResult]()( - implicit e: TResult DefaultsTo Document, - ct: ClassTag[TResult] - ): ListDatabasesObservable[TResult] = - ListDatabasesObservable(wrapped.listDatabases(ct)) - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @tparam TResult the type of the class to use instead of `Document`. - * @return the fluent list databases interface - * @since 2.2 - * @note Requires MongoDB 3.6 or greater - */ - def listDatabases[TResult]( - clientSession: ClientSession - )(implicit e: TResult DefaultsTo Document, ct: ClassTag[TResult]): ListDatabasesObservable[TResult] = - ListDatabasesObservable(wrapped.listDatabases(clientSession, ct)) - - /** - * Creates a change stream for this collection. - * - * @tparam C the target document type of the observable. - * @return the change stream observable - * @since 2.4 - * @note Requires MongoDB 4.0 or greater - */ - def watch[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = - ChangeStreamObservable(wrapped.watch(ct)) - - /** - * Creates a change stream for this collection. - * - * @param pipeline the aggregation pipeline to apply to the change stream - * @tparam C the target document type of the observable. - * @return the change stream observable - * @since 2.4 - * @note Requires MongoDB 4.0 or greater - */ - def watch[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = - ChangeStreamObservable(wrapped.watch(pipeline.asJava, ct)) - - /** - * Creates a change stream for this collection. - * - * @param clientSession the client session with which to associate this operation - * @tparam C the target document type of the observable. - * @return the change stream observable - * @since 2.4 - * @note Requires MongoDB 4.0 or greater - */ - def watch[C]( - clientSession: ClientSession - )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = - ChangeStreamObservable(wrapped.watch(clientSession, ct)) - - /** - * Creates a change stream for this collection. - * - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream - * @tparam C the target document type of the observable. - * @return the change stream observable - * @since 2.4 - * @note Requires MongoDB 4.0 or greater - */ - def watch[C]( - clientSession: ClientSession, - pipeline: Seq[Bson] - )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = - ChangeStreamObservable(wrapped.watch(clientSession, pipeline.asJava, ct)) - /** * Gets the current cluster description. * - *

      - * This method will not block, meaning that it may return a { @link ClusterDescription} whose { @code clusterType} is unknown + * This method will not block, meaning that it may return a `ClusterDescription` whose `clusterType` is unknown * and whose { @link com.mongodb.connection.ServerDescription}s are all in the connecting state. If the application requires - * notifications after the driver has connected to a member of the cluster, it should register a { @link ClusterListener} via - * the { @link ClusterSettings} in { @link com.mongodb.MongoClientSettings}. - *

      + * notifications after the driver has connected to a member of the cluster, it should register a `ClusterListener` via + * the `ClusterSettings` in `MongoClientSettings`. * * @return the current cluster description - * @see ClusterSettings.Builder#addClusterListener(ClusterListener) - * @see com.mongodb.MongoClientSettings.Builder#applyToClusterSettings(com.mongodb.Block) * @since 4.1 */ def getClusterDescription: ClusterDescription = diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala new file mode 100644 index 00000000000..a7352d5ac41 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala @@ -0,0 +1,293 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } +import com.mongodb.{ ReadConcern, ReadPreference, WriteConcern } +import com.mongodb.reactivestreams.client.{ MongoCluster => JMongoCluster } +import org.bson.codecs.configuration.CodecRegistry +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.bson.conversions.Bson + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } +import scala.reflect.ClassTag + +/** + * Companion object for creating new [[MongoCluster]] instances + * + * @since 1.0 + */ +object MongoCluster { + + /** + * Create a new `MongoCluster` wrapper + * + * @param wrapped the java `MongoCluster` instance + * @return MongoCluster + */ + def apply(wrapped: JMongoCluster): MongoCluster = new MongoCluster(wrapped) +} + +/** + * The client-side representation of a MongoDB cluster operations. + * + * The originating [[MongoClient]] is responsible for the closing of resources. + * If the originator [[MongoClient]] is closed, then any operations will fail. + * + * @see MongoClient + * @since 5.2 + */ +class MongoCluster(private val wrapped: JMongoCluster) { + + /** + * Get the codec registry for the MongoDatabase. + * + * @return the { @link org.bson.codecs.configuration.CodecRegistry} + */ + lazy val codecRegistry: CodecRegistry = wrapped.getCodecRegistry + + /** + * Get the read preference for the MongoDatabase. + * + * @return the { @link com.mongodb.ReadPreference} + */ + lazy val readPreference: ReadPreference = wrapped.getReadPreference + + /** + * Get the write concern for the MongoDatabase. + * + * @return the { @link com.mongodb.WriteConcern} + */ + lazy val writeConcern: WriteConcern = wrapped.getWriteConcern + + /** + * Get the read concern for the MongoDatabase. + * + * @return the [[ReadConcern]] + */ + lazy val readConcern: ReadConcern = wrapped.getReadConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + + /** + * Create a new MongoCluster instance with a different codec registry. + * + * The { @link CodecRegistry} configured by this method is effectively treated by the driver as an + * instance of { @link CodecProvider}, which { @link CodecRegistry} extends. + * So there is no benefit to defining a class that implements { @link CodecRegistry}. Rather, an + * application should always create { @link CodecRegistry} instances using the factory methods in + * { @link CodecRegistries}. + * + * @param codecRegistry the new { @link org.bson.codecs.configuration.CodecRegistry} for the collection + * @return a new MongoCluster instance with the different codec registry + * @see CodecRegistries + */ + def withCodecRegistry(codecRegistry: CodecRegistry): MongoCluster = + MongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param readPreference the new { @link com.mongodb.ReadPreference} for the collection + * @return a new MongoCluster instance with the different readPreference + */ + def withReadPreference(readPreference: ReadPreference): MongoCluster = + MongoCluster(wrapped.withReadPreference(readPreference)) + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param writeConcern the new { @link com.mongodb.WriteConcern} for the collection + * @return a new MongoCluster instance with the different writeConcern + */ + def withWriteConcern(writeConcern: WriteConcern): MongoCluster = + MongoCluster(wrapped.withWriteConcern(writeConcern)) + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param readConcern the new [[ReadConcern]] for the collection + * @return a new MongoCluster instance with the different ReadConcern + * @since 1.1 + */ + def withReadConcern(readConcern: ReadConcern): MongoCluster = + MongoCluster(wrapped.withReadConcern(readConcern)) + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoCluster instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): MongoCluster = + MongoCluster(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + + /** + * Creates a client session. + * + * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations. + * + * @since 2.4 + * @note Requires MongoDB 3.6 or greater + */ + def startSession(): SingleObservable[ClientSession] = wrapped.startSession() + + /** + * Creates a client session. + * + * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations. + * + * @param options the options for the client session + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def startSession(options: ClientSessionOptions): SingleObservable[ClientSession] = wrapped.startSession(options) + + /** + * Gets the database with the given name. + * + * @param name the name of the database + * @return the database + */ + def getDatabase(name: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(name)) + + /** + * Get a list of the database names + * + * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]] + * @return an iterable containing all the names of all the databases + */ + def listDatabaseNames(): Observable[String] = wrapped.listDatabaseNames() + + /** + * Get a list of the database names + * + * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]] + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the databases + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def listDatabaseNames(clientSession: ClientSession): Observable[String] = wrapped.listDatabaseNames(clientSession) + + /** + * Gets the list of databases + * + * @tparam TResult the type of the class to use instead of `Document`. + * @return the fluent list databases interface + */ + def listDatabases[TResult]()( + implicit e: TResult DefaultsTo Document, + ct: ClassTag[TResult] + ): ListDatabasesObservable[TResult] = + ListDatabasesObservable(wrapped.listDatabases(ct)) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @tparam TResult the type of the class to use instead of `Document`. + * @return the fluent list databases interface + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def listDatabases[TResult]( + clientSession: ClientSession + )(implicit e: TResult DefaultsTo Document, ct: ClassTag[TResult]): ListDatabasesObservable[TResult] = + ListDatabasesObservable(wrapped.listDatabases(clientSession, ct)) + + /** + * Creates a change stream for this collection. + * + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(ct)) + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(pipeline.asJava, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]( + clientSession: ClientSession + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]( + clientSession: ClientSession, + pipeline: Seq[Bson] + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, pipeline.asJava, ct)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala index e2682e0130d..bdd63f9245a 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala @@ -16,6 +16,7 @@ package org.mongodb.scala +import com.mongodb.annotations.{ Alpha, Reason } import com.mongodb.client.model.DropCollectionOptions import java.util @@ -27,6 +28,7 @@ import org.mongodb.scala.model._ import org.mongodb.scala.result._ import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS, TimeUnit } import scala.reflect.ClassTag // scalastyle:off number.of.methods file.size.limit @@ -83,6 +85,29 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul */ lazy val readConcern: ReadConcern = wrapped.getReadConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + /** * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. * @@ -136,6 +161,20 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul def withReadConcern(readConcern: ReadConcern): MongoCollection[TResult] = MongoCollection(wrapped.withReadConcern(readConcern)) + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoCollection instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): MongoCollection[TResult] = + MongoCollection(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + /** * Gets an estimate of the count of documents in a collection using collection metadata. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala index 33ad891373c..54c48574c72 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala @@ -16,6 +16,7 @@ package org.mongodb.scala +import com.mongodb.annotations.{ Alpha, Reason } import com.mongodb.client.model.{ CreateCollectionOptions, CreateViewOptions } import com.mongodb.reactivestreams.client.{ MongoDatabase => JMongoDatabase } import org.bson.codecs.configuration.CodecRegistry @@ -23,6 +24,7 @@ import org.mongodb.scala.bson.DefaultHelper.DefaultsTo import org.mongodb.scala.bson.conversions.Bson import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } import scala.reflect.ClassTag /** @@ -69,6 +71,29 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { */ lazy val readConcern: ReadConcern = wrapped.getReadConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -113,6 +138,20 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { def withReadConcern(readConcern: ReadConcern): MongoDatabase = MongoDatabase(wrapped.withReadConcern(readConcern)) + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoDatabase instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + /** * Gets a collection, with a specific default document class. * @@ -128,6 +167,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { /** * Executes command in the context of the current database using the primary server. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * * @param command the command to be run * @tparam TResult the type of the class to use instead of [[Document]]. * @return a Observable containing the command result @@ -140,6 +182,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { /** * Executes command in the context of the current database. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * * @param command the command to be run * @param readPreference the [[ReadPreference]] to be used when executing the command * @tparam TResult the type of the class to use instead of [[Document]]. @@ -154,6 +199,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { /** * Executes command in the context of the current database using the primary server. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @tparam TResult the type of the class to use instead of [[Document]]. @@ -170,6 +218,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { /** * Executes command in the context of the current database. * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * * @param command the command to be run * @param readPreference the [[ReadPreference]] to be used when executing the command * @tparam TResult the type of the class to use instead of [[Document]]. diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala index 88400883009..b828fe6074f 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala @@ -16,8 +16,8 @@ package org.mongodb.scala.gridfs +import com.mongodb.annotations.{ Alpha, Reason } import java.nio.ByteBuffer - import com.mongodb.reactivestreams.client.gridfs.{ GridFSBucket => JGridFSBucket, GridFSBuckets } import org.mongodb.scala.bson.conversions.Bson import org.mongodb.scala.bson.{ BsonObjectId, BsonValue, ObjectId } @@ -31,6 +31,8 @@ import org.mongodb.scala.{ WriteConcern } +import scala.concurrent.duration.{ Duration, MILLISECONDS } + /** * A factory for GridFSBucket instances. * @@ -102,6 +104,29 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { */ lazy val readConcern: ReadConcern = wrapped.getReadConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + /** * Create a new GridFSBucket instance with a new chunk size in bytes. * @@ -137,12 +162,29 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { */ def withReadConcern(readConcern: ReadConcern): GridFSBucket = GridFSBucket(wrapped.withReadConcern(readConcern)) + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new GridFSBucket instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): GridFSBucket = + GridFSBucket(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + /** * Uploads the contents of the given `Observable` to a GridFS bucket. * * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param filename the filename for the stream * @param source the Publisher providing the file data @@ -158,6 +200,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param filename the filename for the stream * @param source the Publisher providing the file data @@ -178,6 +223,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param id the custom id value of the file * @param filename the filename for the stream @@ -198,6 +246,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param id the custom id value of the file * @param filename the filename for the stream @@ -220,6 +271,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param clientSession the client session with which to associate this operation * @param filename the filename for the stream @@ -241,6 +295,10 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * * @param clientSession the client session with which to associate this operation * @param filename the filename for the stream * @param source the Publisher providing the file data @@ -263,6 +321,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param clientSession the client session with which to associate this operation * @param id the custom id value of the file @@ -286,6 +347,10 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * * @param clientSession the client session with which to associate this operation * @param id the custom id value of the file * @param filename the filename for the stream diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala index 79d0a4a17b1..fdbea9add70 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala @@ -17,10 +17,9 @@ package org.mongodb.scala.gridfs import java.util.concurrent.TimeUnit - import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher import org.mongodb.scala.bson.conversions.Bson -import org.mongodb.scala.{ Observable, Observer, SingleObservable } +import org.mongodb.scala.{ Observable, Observer, SingleObservable, TimeoutMode } import scala.concurrent.duration.Duration diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala index fc3196f76f6..0fff8c4c8ba 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala @@ -16,7 +16,7 @@ package org.mongodb.scala.model -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.fill.FillOutputField import com.mongodb.client.model.search.FieldSearchPath @@ -737,7 +737,7 @@ object Aggregates { * @note Requires MongoDB 6.0.10 or greater * @since 4.11 */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) def vectorSearch( path: FieldSearchPath, queryVector: Iterable[java.lang.Double], @@ -763,7 +763,7 @@ object Aggregates { * @note Requires MongoDB 6.0.10 or greater * @since 4.11 */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) def vectorSearch( path: FieldSearchPath, queryVector: Iterable[java.lang.Double], diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala index 4688fa818c6..5ccbd299edf 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.{ MongoTimeUnit => JMongoTimeUnit, Windows => JWindows } import org.bson.types.Decimal128 import org.mongodb.scala.bson.conversions.Bson @@ -56,7 +56,7 @@ import org.mongodb.scala.bson.conversions.Bson * @since 4.3 * @note Requires MongoDB 5.0 or greater. */ -@Beta(Array(Beta.Reason.SERVER)) +@Beta(Array(Reason.SERVER)) object Windows { /** @@ -248,7 +248,7 @@ object Windows { * @since 4.3 * @note Requires MongoDB 5.0 or greater. */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) object Bound { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala index a8dc63a2b29..111af0e6568 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala @@ -16,7 +16,7 @@ package org.mongodb.scala -import com.mongodb.annotations.{ Beta, Sealed } +import com.mongodb.annotations.{ Beta, Reason, Sealed } import scala.collection.JavaConverters._ import com.mongodb.client.model.{ GeoNearOptions, MongoTimeUnit => JMongoTimeUnit, WindowOutputField } @@ -173,7 +173,7 @@ package object model { * * @since 4.9 */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) type CreateEncryptedCollectionParams = com.mongodb.client.model.CreateEncryptedCollectionParams /** @@ -181,7 +181,7 @@ package object model { * * @since 4.9 */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) object CreateEncryptedCollectionParams { def apply(kmsProvider: String) = new com.mongodb.client.model.CreateEncryptedCollectionParams(kmsProvider) diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala index afeb5d195d8..d106d6bbd9d 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ FuzzySearchOptions => JFuzzySearchOptions } /** @@ -25,7 +25,7 @@ import com.mongodb.client.model.search.{ FuzzySearchOptions => JFuzzySearchOptio * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object FuzzySearchOptions { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala index d4fe9ccdffc..a651e502b10 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchCollector => JSearchCollector } import org.mongodb.scala.bson.conversions.Bson import org.mongodb.scala.model.Projections @@ -30,7 +30,7 @@ import scala.collection.JavaConverters._ * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object SearchCollector { /** @@ -42,7 +42,7 @@ object SearchCollector { * @return The requested `SearchCollector`. * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/ facet collector]] */ - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) def facet(operator: SearchOperator, facets: Iterable[_ <: SearchFacet]): FacetSearchCollector = JSearchCollector.facet(operator, facets.asJava) diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala index 0df9a08ac51..ecba0ecce0d 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchCount => JSearchCount } import org.mongodb.scala.bson.conversions.Bson import org.mongodb.scala.model.Projections @@ -28,7 +28,7 @@ import org.mongodb.scala.model.Projections * @see [[https://www.mongodb.com/docs/atlas/atlas-search/counting/ Counting]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) +@Beta(Array(Reason.CLIENT, Reason.SERVER)) object SearchCount { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala index 4482c8bc678..3bc27520ea3 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchFacet => JSearchFacet } import org.mongodb.scala.bson.conversions.Bson @@ -28,7 +28,7 @@ import collection.JavaConverters._ * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#facet-definition Facet definition]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) +@Beta(Array(Reason.CLIENT, Reason.SERVER)) object SearchFacet { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala index a46903a3147..7ac1deebac1 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchHighlight => JSearchHighlight } import org.mongodb.scala.bson.conversions.Bson import org.mongodb.scala.model.Projections @@ -30,7 +30,7 @@ import collection.JavaConverters._ * @see [[https://www.mongodb.com/docs/atlas/atlas-search/highlighting/ Highlighting]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object SearchHighlight { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala index a1dc4caebff..90f27092ebc 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchOperator => JSearchOperator } import org.mongodb.scala.bson.conversions.Bson import org.mongodb.scala.model.geojson.Point @@ -29,7 +29,7 @@ import collection.JavaConverters._ * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object SearchOperator { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala index 56069e8624d..5eb61591043 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchOptions => JSearchOptions } /** @@ -24,7 +24,7 @@ import com.mongodb.client.model.search.{ SearchOptions => JSearchOptions } * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search syntax]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object SearchOptions { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala index cfe85faa6f7..74999deef35 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchPath => JSearchPath } /** @@ -27,7 +27,7 @@ import com.mongodb.client.model.search.{ SearchPath => JSearchPath } * @see [[https://www.mongodb.com/docs/atlas/atlas-search/path-construction/ Path]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object SearchPath { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala index b43598220e3..35005c05970 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchScore => JSearchScore } import org.mongodb.scala.bson.conversions.Bson import org.mongodb.scala.model.Projections @@ -28,7 +28,7 @@ import org.mongodb.scala.model.Projections * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object SearchScore { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala index 22657bc874e..244c07e5847 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ SearchScoreExpression => JSearchScoreExpression } import org.mongodb.scala.bson.conversions.Bson @@ -26,7 +26,7 @@ import collection.JavaConverters._ * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#expressions Expressions for the function score modifier]] * @since 4.7 */ -@Beta(Array(Beta.Reason.CLIENT)) +@Beta(Array(Reason.CLIENT)) object SearchScoreExpression { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala index e355a5558cc..ab25650ca7a 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model.search -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.search.{ VectorSearchOptions => JVectorSearchOptions } /** @@ -25,7 +25,7 @@ import com.mongodb.client.model.search.{ VectorSearchOptions => JVectorSearchOpt * @note Requires MongoDB 6.0.10 or greater * @since 4.11 */ -@Beta(Array(Beta.Reason.SERVER)) +@Beta(Array(Reason.SERVER)) object VectorSearchOptions { /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala index e3f3fb5e308..fb9e393dd1b 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala @@ -15,7 +15,7 @@ */ package org.mongodb.scala.model -import com.mongodb.annotations.{ Beta, Sealed } +import com.mongodb.annotations.{ Beta, Reason, Sealed } /** * Query building API for MongoDB Atlas full-text search. @@ -40,7 +40,7 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type SearchOperator = com.mongodb.client.model.search.SearchOperator /** @@ -50,14 +50,14 @@ package object search { * @see `SearchOperator.compound()` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type CompoundSearchOperatorBase = com.mongodb.client.model.search.CompoundSearchOperatorBase /** * @see `SearchOperator.compound()` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type CompoundSearchOperator = com.mongodb.client.model.search.CompoundSearchOperator /** @@ -68,7 +68,7 @@ package object search { * @see `CompoundSearchOperatorBase.must(Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type MustCompoundSearchOperator = com.mongodb.client.model.search.MustCompoundSearchOperator /** @@ -79,7 +79,7 @@ package object search { * @see `CompoundSearchOperatorBase.mustNot(Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type MustNotCompoundSearchOperator = com.mongodb.client.model.search.MustNotCompoundSearchOperator /** @@ -90,7 +90,7 @@ package object search { * @see `CompoundSearchOperatorBase.should(Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type ShouldCompoundSearchOperator = com.mongodb.client.model.search.ShouldCompoundSearchOperator /** @@ -101,14 +101,14 @@ package object search { * @see `CompoundSearchOperatorBase.filter(Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type FilterCompoundSearchOperator = com.mongodb.client.model.search.FilterCompoundSearchOperator /** * @see `SearchOperator.exists(FieldSearchPath)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type ExistsSearchOperator = com.mongodb.client.model.search.ExistsSearchOperator /** @@ -116,7 +116,7 @@ package object search { * @see `SearchOperator.text(Iterable, Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type TextSearchOperator = com.mongodb.client.model.search.TextSearchOperator /** @@ -124,7 +124,7 @@ package object search { * @see `SearchOperator.autocomplete(Iterable, FieldSearchPath)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type AutocompleteSearchOperator = com.mongodb.client.model.search.AutocompleteSearchOperator /** @@ -134,7 +134,7 @@ package object search { * @see `SearchOperator.numberRange` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type NumberRangeSearchOperatorBase = com.mongodb.client.model.search.NumberRangeSearchOperatorBase /** @@ -144,42 +144,42 @@ package object search { * @see `SearchOperator.dateRange` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type DateRangeSearchOperatorBase = com.mongodb.client.model.search.DateRangeSearchOperatorBase /** * @see `SearchOperator.numberRange` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type NumberRangeSearchOperator = com.mongodb.client.model.search.NumberRangeSearchOperator /** * @see `SearchOperator.dateRange` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type DateRangeSearchOperator = com.mongodb.client.model.search.DateRangeSearchOperator /** * @see `SearchOperator.near` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type NumberNearSearchOperator = com.mongodb.client.model.search.NumberNearSearchOperator /** * @see `SearchOperator.near` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type DateNearSearchOperator = com.mongodb.client.model.search.DateNearSearchOperator /** * @see `SearchOperator.near` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type GeoNearSearchOperator = com.mongodb.client.model.search.GeoNearSearchOperator /** @@ -189,7 +189,7 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type FuzzySearchOptions = com.mongodb.client.model.search.FuzzySearchOptions /** @@ -200,14 +200,14 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type SearchCollector = com.mongodb.client.model.search.SearchCollector /** * @see `SearchCollector.facet(SearchOperator, Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type FacetSearchCollector = com.mongodb.client.model.search.FacetSearchCollector /** @@ -216,7 +216,7 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search syntax]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type SearchOptions = com.mongodb.client.model.search.SearchOptions /** @@ -227,7 +227,7 @@ package object search { * @since 4.11 */ @Sealed - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) type VectorSearchOptions = com.mongodb.client.model.search.VectorSearchOptions /** @@ -238,7 +238,7 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/highlighting/ Highlighting]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type SearchHighlight = com.mongodb.client.model.search.SearchHighlight /** @@ -250,21 +250,21 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/counting/ Counting]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type SearchCount = com.mongodb.client.model.search.SearchCount /** * @see `SearchCount.total()` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type TotalSearchCount = com.mongodb.client.model.search.TotalSearchCount /** * @see `SearchCount.lowerBound()` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type LowerBoundSearchCount = com.mongodb.client.model.search.LowerBoundSearchCount /** @@ -273,28 +273,28 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#facet-definition Facet definition]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type SearchFacet = com.mongodb.client.model.search.SearchFacet /** * @see `SearchFacet.stringFacet(String, FieldSearchPath)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type StringSearchFacet = com.mongodb.client.model.search.StringSearchFacet /** * @see `SearchFacet.numberFacet(String, FieldSearchPath, Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type NumberSearchFacet = com.mongodb.client.model.search.NumberSearchFacet /** * @see `SearchFacet.dateFacet(String, FieldSearchPath, Iterable)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER)) + @Beta(Array(Reason.CLIENT, Reason.SERVER)) type DateSearchFacet = com.mongodb.client.model.search.DateSearchFacet /** @@ -306,21 +306,21 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/path-construction/ Path]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type SearchPath = com.mongodb.client.model.search.SearchPath /** * @see `SearchPath.fieldPath(String)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type FieldSearchPath = com.mongodb.client.model.search.FieldSearchPath /** * @see `SearchPath.wildcardPath(String)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type WildcardSearchPath = com.mongodb.client.model.search.WildcardSearchPath /** @@ -331,35 +331,35 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type SearchScore = com.mongodb.client.model.search.SearchScore /** * @see `SearchScore.boost(float)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type ValueBoostSearchScore = com.mongodb.client.model.search.ValueBoostSearchScore /** * @see `SearchScore.boost(FieldSearchPath)` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type PathBoostSearchScore = com.mongodb.client.model.search.PathBoostSearchScore /** * @see `SearchScore.constant` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type ConstantSearchScore = com.mongodb.client.model.search.ConstantSearchScore /** * @see `SearchScore.function` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type FunctionSearchScore = com.mongodb.client.model.search.FunctionSearchScore /** @@ -367,62 +367,62 @@ package object search { * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#expressions Expressions for the function score modifier]] */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type SearchScoreExpression = com.mongodb.client.model.search.SearchScoreExpression /** * @see `SearchScoreExpression.relevanceExpression` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type RelevanceSearchScoreExpression = com.mongodb.client.model.search.RelevanceSearchScoreExpression /** * @see `SearchScoreExpression.pathExpression` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type PathSearchScoreExpression = com.mongodb.client.model.search.PathSearchScoreExpression /** * @see `SearchScoreExpression.constantExpression` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type ConstantSearchScoreExpression = com.mongodb.client.model.search.ConstantSearchScoreExpression /** * @see `SearchScoreExpression.gaussExpression` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type GaussSearchScoreExpression = com.mongodb.client.model.search.GaussSearchScoreExpression /** * @see `SearchScoreExpression.log` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type LogSearchScoreExpression = com.mongodb.client.model.search.LogSearchScoreExpression /** * @see `SearchScoreExpression.log1p` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type Log1pSearchScoreExpression = com.mongodb.client.model.search.Log1pSearchScoreExpression /** * @see `SearchScoreExpression.addExpression` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type AddSearchScoreExpression = com.mongodb.client.model.search.AddSearchScoreExpression /** * @see `SearchScoreExpression.multiplyExpression` */ @Sealed - @Beta(Array(Beta.Reason.CLIENT)) + @Beta(Array(Reason.CLIENT)) type MultiplySearchScoreExpression = com.mongodb.client.model.search.MultiplySearchScoreExpression } diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala index bf1f7b1ae5b..f57ddce32c6 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala @@ -16,7 +16,7 @@ package org.mongodb.scala.model -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.vault.{ DataKeyOptions => JDataKeyOptions } import com.mongodb.client.model.vault.{ EncryptOptions => JEncryptOptions } import com.mongodb.client.model.vault.{ RangeOptions => JRangeOptions } @@ -60,7 +60,7 @@ package object vault { * * @since 4.9 */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) type RangeOptions = JRangeOptions object RangeOptions { diff --git a/driver-scala/src/main/scala/org/mongodb/scala/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/package.scala index b52ff13fd61..7da5578ff96 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/package.scala @@ -16,7 +16,7 @@ package org.mongodb -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import org.bson.BsonDocumentReader import org.bson.codecs.{ DecoderContext, DocumentCodec } import org.mongodb.scala.bson.BsonDocument @@ -108,6 +108,16 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit */ type TagSet = com.mongodb.TagSet + /** + * The timeout mode for a cursor + * + * For operations that create cursors, `timeoutMS` can either cap the lifetime of the cursor or be applied separately to the + * original operation and all next calls. + * + * @since 5.2 + */ + type TimeoutMode = com.mongodb.client.cursor.TimeoutMode + /** * Controls the acknowledgment of write operations with various options. */ @@ -323,6 +333,11 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit */ type MongoSocketReadTimeoutException = com.mongodb.MongoSocketReadTimeoutException + /** + * This exception is thrown when there is a timeout writing to a socket. + */ + type MongoSocketWriteTimeoutException = com.mongodb.MongoSocketWriteTimeoutException + /** * This exception is thrown when there is an exception writing a response to a Socket. */ @@ -333,6 +348,19 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit */ type MongoTimeoutException = com.mongodb.MongoTimeoutException + /** + * Exception thrown to indicate that a MongoDB operation has exceeded the specified timeout for + * the full execution of operation. + * + *

      The [[MongoOperationTimeoutException]] might provide information about the underlying + * cause of the timeout, if available. For example, if retries are attempted due to transient failures, + * and a timeout occurs in any of the attempts, the exception from one of the retries may be appended + * as the cause to this [[MongoOperationTimeoutException]]. + + @since 5.0 + */ + type MongoOperationTimeoutException = com.mongodb.MongoOperationTimeoutException + /** * An exception indicating a failure to apply the write concern to the requested write operation * @@ -367,7 +395,7 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit * * @since 4.9 */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) type MongoUpdatedEncryptedFieldsException = com.mongodb.MongoUpdatedEncryptedFieldsException /** diff --git a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala index b4c9de4d440..3d375b56e21 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala @@ -16,7 +16,7 @@ package org.mongodb.scala.vault -import com.mongodb.annotations.Beta +import com.mongodb.annotations.{ Beta, Reason } import com.mongodb.client.model.{ CreateCollectionOptions, CreateEncryptedCollectionParams } import java.io.Closeable @@ -91,7 +91,7 @@ case class ClientEncryption(private val wrapped: JClientEncryption) extends Clos * @return a Publisher containing the queryable encrypted range expression * @since 4.9 */ - @Beta(Array(Beta.Reason.SERVER)) def encryptExpression( + @Beta(Array(Reason.SERVER)) def encryptExpression( expression: Document, options: EncryptOptions ): SingleObservable[Document] = @@ -126,7 +126,7 @@ case class ClientEncryption(private val wrapped: JClientEncryption) extends Clos * @note Requires MongoDB 7.0 or greater. * @see [[https://www.mongodb.com/docs/manual/reference/command/create/ Create Command]] */ - @Beta(Array(Beta.Reason.SERVER)) + @Beta(Array(Reason.SERVER)) def createEncryptedCollection( database: MongoDatabase, collectionName: String, diff --git a/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala index d18004e5aa5..b0edcb68b8e 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala @@ -17,13 +17,13 @@ package org.mongodb.scala import com.mongodb.ExplainVerbosity - -import java.util.concurrent.TimeUnit +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.AggregatePublisher import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } import org.mongodb.scala.model.Collation import org.scalatestplus.mockito.MockitoSugar +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration class AggregateObservableSpec extends BaseSpec with MockitoSugar { @@ -59,6 +59,7 @@ class AggregateObservableSpec extends BaseSpec with MockitoSugar { observable.batchSize(batchSize) observable.explain[Document]() observable.explain[Document](verbosity) + observable.timeoutMode(TimeoutMode.ITERATION) verify(wrapper).allowDiskUse(true) verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) @@ -70,6 +71,7 @@ class AggregateObservableSpec extends BaseSpec with MockitoSugar { verify(wrapper).batchSize(batchSize) verify(wrapper).explain(ct) verify(wrapper).explain(ct, verbosity) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) observable.toCollection() verify(wrapper).toCollection diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala index 9d1a86ee75a..b22d0d8373d 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala @@ -87,6 +87,7 @@ class ApiAliasAndCompanionSpec extends BaseSpec { "AggregatePrimer", "RemovePrimer", "SyncMongoClient", + "SyncMongoCluster", "SyncGridFSBucket", "SyncMongoDatabase", "SyncClientEncryption" @@ -104,7 +105,8 @@ class ApiAliasAndCompanionSpec extends BaseSpec { "package", "ReadConcernLevel", "SingleObservable", - "Subscription" + "Subscription", + "TimeoutMode" ) val classFilter = (f: Class[_ <: Object]) => { diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala index ea5a3eb5543..03c745d0ae6 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala @@ -16,8 +16,7 @@ package org.mongodb.scala -import java.util.concurrent.TimeUnit - +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ChangeStreamPublisher import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } import org.mongodb.scala.bson.BsonTimestamp @@ -26,6 +25,7 @@ import org.mongodb.scala.model.changestream.FullDocument import org.reactivestreams.Publisher import org.scalatestplus.mockito.MockitoSugar +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration import scala.util.Success diff --git a/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala index e609f8ccdc8..e55455579b4 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala @@ -15,16 +15,15 @@ */ package org.mongodb.scala -import java.util.concurrent.TimeUnit - +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.DistinctPublisher import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } import org.mongodb.scala.model.Collation import org.reactivestreams.Publisher import org.scalatestplus.mockito.MockitoSugar +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration - class DistinctObservableSpec extends BaseSpec with MockitoSugar { "DistinctObservable" should "have the same methods as the wrapped DistinctObservable" in { @@ -51,11 +50,14 @@ class DistinctObservableSpec extends BaseSpec with MockitoSugar { observable.maxTime(duration) observable.collation(collation) observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) verify(wrapper).filter(filter) verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) verify(wrapper).collation(collation) verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + verifyNoMoreInteractions(wrapper) } } diff --git a/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala index 1af77eeb6e7..eaf117a1348 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala @@ -16,6 +16,7 @@ package org.mongodb.scala +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.FindPublisher import com.mongodb.{ CursorType, ExplainVerbosity } import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } @@ -75,6 +76,7 @@ class FindObservableSpec extends BaseSpec with MockitoSugar { observable.allowDiskUse(true) observable.explain[Document]() observable.explain[Document](verbosity) + observable.timeoutMode(TimeoutMode.ITERATION) verify(wrapper).collation(collation) verify(wrapper).cursorType(CursorType.NonTailable) @@ -93,6 +95,8 @@ class FindObservableSpec extends BaseSpec with MockitoSugar { verify(wrapper).allowDiskUse(true) verify(wrapper).explain(ct) verify(wrapper).explain(ct, verbosity) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + verifyNoMoreInteractions(wrapper) } } diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala index 60ebad3c597..20990f68b58 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala @@ -16,13 +16,13 @@ package org.mongodb.scala -import java.util.concurrent.TimeUnit - +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListCollectionsPublisher import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } import org.reactivestreams.Publisher import org.scalatestplus.mockito.MockitoSugar +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration class ListCollectionsObservableSpec extends BaseSpec with MockitoSugar { @@ -49,10 +49,13 @@ class ListCollectionsObservableSpec extends BaseSpec with MockitoSugar { observable.filter(filter) observable.maxTime(duration) observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) verify(wrapper).filter(filter) verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + verifyNoMoreInteractions(wrapper) } } diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala index a0d36fac78d..a80b421af85 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala @@ -15,13 +15,13 @@ */ package org.mongodb.scala -import java.util.concurrent.TimeUnit - +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListDatabasesPublisher import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } import org.reactivestreams.Publisher import org.scalatestplus.mockito.MockitoSugar +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration class ListDatabasesObservableSpec extends BaseSpec with MockitoSugar { @@ -48,11 +48,13 @@ class ListDatabasesObservableSpec extends BaseSpec with MockitoSugar { observable.filter(filter) observable.nameOnly(true) observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) verify(wrapper).filter(filter) verify(wrapper).nameOnly(true) verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) verifyNoMoreInteractions(wrapper) } diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala index 29d7fbe670d..da841fe6656 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala @@ -15,13 +15,13 @@ */ package org.mongodb.scala -import java.util.concurrent.TimeUnit - +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.reactivestreams.client.ListIndexesPublisher import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } import org.reactivestreams.Publisher import org.scalatestplus.mockito.MockitoSugar +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration class ListIndexesObservableSpec extends BaseSpec with MockitoSugar { @@ -45,9 +45,12 @@ class ListIndexesObservableSpec extends BaseSpec with MockitoSugar { observable.maxTime(duration) observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + verifyNoMoreInteractions(wrapper) } } diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala index 1b8d164bd21..af08a0f0452 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala @@ -16,14 +16,14 @@ package org.mongodb.scala -import java.util.concurrent.TimeUnit - +import com.mongodb.client.cursor.TimeoutMode import com.mongodb.client.model.MapReduceAction import com.mongodb.reactivestreams.client.MapReducePublisher import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } import org.mongodb.scala.model.Collation import org.scalatestplus.mockito.MockitoSugar +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration class MapReduceObservableSpec extends BaseSpec with MockitoSugar { @@ -63,6 +63,7 @@ class MapReduceObservableSpec extends BaseSpec with MockitoSugar { observable.bypassDocumentValidation(true) observable.collation(collation) observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) verify(wrapper).filter(filter) verify(wrapper).scope(scope) @@ -78,6 +79,8 @@ class MapReduceObservableSpec extends BaseSpec with MockitoSugar { verify(wrapper).bypassDocumentValidation(true) verify(wrapper).collation(collation) verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + verifyNoMoreInteractions(wrapper) observable.toCollection() verify(wrapper).toCollection diff --git a/driver-sync/src/main/com/mongodb/client/AggregateIterable.java b/driver-sync/src/main/com/mongodb/client/AggregateIterable.java index 83e232fecc4..5f7a0dc2aff 100644 --- a/driver-sync/src/main/com/mongodb/client/AggregateIterable.java +++ b/driver-sync/src/main/com/mongodb/client/AggregateIterable.java @@ -17,6 +17,9 @@ package com.mongodb.client; import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -62,6 +65,31 @@ public interface AggregateIterable extends MongoIterable { */ AggregateIterable batchSize(int batchSize); + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + *

      + * If the {@code timeout} is set then: + *

        + *
      • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
      • + *
      • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
      • + *
      + *

      + * Will error if the timeoutMode is set to {@link TimeoutMode#ITERATION} and the pipeline contains either + * an {@code $out} or a {@code $merge} stage. + *

      + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + AggregateIterable timeoutMode(TimeoutMode timeoutMode); + /** * Sets the maximum execution time on the server for this operation. * diff --git a/driver-sync/src/main/com/mongodb/client/DistinctIterable.java b/driver-sync/src/main/com/mongodb/client/DistinctIterable.java index f044a96ab41..9206b7d3094 100644 --- a/driver-sync/src/main/com/mongodb/client/DistinctIterable.java +++ b/driver-sync/src/main/com/mongodb/client/DistinctIterable.java @@ -16,6 +16,9 @@ package com.mongodb.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -88,4 +91,19 @@ public interface DistinctIterable extends MongoIterable { * @mongodb.server.release 4.4 */ DistinctIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + DistinctIterable timeoutMode(TimeoutMode timeoutMode); } diff --git a/driver-sync/src/main/com/mongodb/client/FindIterable.java b/driver-sync/src/main/com/mongodb/client/FindIterable.java index 4cd3c7b7f43..d610ed73ffa 100644 --- a/driver-sync/src/main/com/mongodb/client/FindIterable.java +++ b/driver-sync/src/main/com/mongodb/client/FindIterable.java @@ -18,6 +18,9 @@ import com.mongodb.CursorType; import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.client.model.Projections; import com.mongodb.lang.Nullable; @@ -261,6 +264,28 @@ public interface FindIterable extends MongoIterable { */ FindIterable allowDiskUse(@Nullable Boolean allowDiskUse); + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + *

      + * If the {@code timeout} is set then: + *

        + *
      • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
      • + *
      • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
      • + *
      + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + FindIterable timeoutMode(TimeoutMode timeoutMode); + /** * Explain the execution plan for this operation with the server's default verbosity level * diff --git a/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java b/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java index 52480103d07..421fbcaa674 100644 --- a/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java +++ b/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java @@ -16,6 +16,9 @@ package com.mongodb.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import org.bson.BsonValue; import org.bson.conversions.Bson; @@ -79,4 +82,18 @@ public interface ListCollectionsIterable extends MongoIterable * @mongodb.server.release 4.4 */ ListCollectionsIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListCollectionsIterable timeoutMode(TimeoutMode timeoutMode); } diff --git a/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java b/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java index 9b344a6ae89..75625e487a0 100644 --- a/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java +++ b/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java @@ -16,6 +16,9 @@ package com.mongodb.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import org.bson.BsonValue; import org.bson.conversions.Bson; @@ -101,4 +104,18 @@ public interface ListDatabasesIterable extends MongoIterable { * @mongodb.server.release 4.4 */ ListDatabasesIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListDatabasesIterable timeoutMode(TimeoutMode timeoutMode); } diff --git a/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java b/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java index 2b3de183d64..160cb59ebd9 100644 --- a/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java +++ b/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java @@ -16,6 +16,9 @@ package com.mongodb.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -68,4 +71,18 @@ public interface ListIndexesIterable extends MongoIterable { * @mongodb.server.release 4.4 */ ListIndexesIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListIndexesIterable timeoutMode(TimeoutMode timeoutMode); } diff --git a/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java index 1cd61add5a0..2384fcef29d 100644 --- a/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java +++ b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java @@ -17,7 +17,10 @@ package com.mongodb.client; import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Evolving; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -98,6 +101,20 @@ public interface ListSearchIndexesIterable extends MongoIterable comment(@Nullable BsonValue comment); + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListSearchIndexesIterable timeoutMode(TimeoutMode timeoutMode); + /** * Explain the execution plan for this operation with the server's default verbosity level. * diff --git a/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java b/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java index 30706dd6373..d406e785da7 100644 --- a/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java +++ b/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java @@ -16,6 +16,9 @@ package com.mongodb.client; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.lang.Nullable; import org.bson.conversions.Bson; @@ -179,4 +182,18 @@ public interface MapReduceIterable extends MongoIterable { * @mongodb.server.release 3.4 */ MapReduceIterable collation(@Nullable Collation collation); + + /** + * Sets the timeoutMode for the cursor. + * + *

      + * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

      + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + MapReduceIterable timeoutMode(TimeoutMode timeoutMode); } diff --git a/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java b/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java index 38e33c8ae8e..ed58412496d 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java +++ b/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java @@ -33,6 +33,16 @@ * } * } * + * + *

      + * A {@link com.mongodb.MongoOperationTimeoutException} does not invalidate the {@link MongoChangeStreamCursor}, but is immediately + * propagated to the caller. Subsequent method call will attempt to resume operation by establishing a new change stream on the server, + * without doing {@code getMore} request first.

      + *

      + * If a {@link com.mongodb.MongoOperationTimeoutException} occurs before any events are received, it indicates that the server + * has timed out before it could finish processing the existing oplog. In such cases, it is recommended to close the current stream + * and recreate it with a higher timeout setting. + * * @since 3.11 * @param The type of documents the cursor contains */ diff --git a/driver-sync/src/main/com/mongodb/client/MongoClient.java b/driver-sync/src/main/com/mongodb/client/MongoClient.java index c0b0565df81..14519e2413a 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoClient.java +++ b/driver-sync/src/main/com/mongodb/client/MongoClient.java @@ -16,17 +16,12 @@ package com.mongodb.client; -import com.mongodb.ClientSessionOptions; -import com.mongodb.MongoNamespace; import com.mongodb.annotations.Immutable; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ClusterSettings; import com.mongodb.event.ClusterListener; -import org.bson.Document; -import org.bson.conversions.Bson; import java.io.Closeable; -import java.util.List; /** * A client-side representation of a MongoDB cluster. Instances can represent either a standalone MongoDB instance, a replica set, @@ -42,38 +37,7 @@ * @since 3.7 */ @Immutable -public interface MongoClient extends Closeable { - - /** - * Gets a {@link MongoDatabase} instance for the given database name. - * - * @param databaseName the name of the database to retrieve - * @return a {@code MongoDatabase} representing the specified database - * @throws IllegalArgumentException if databaseName is invalid - * @see MongoNamespace#checkDatabaseNameValidity(String) - */ - MongoDatabase getDatabase(String databaseName); - - /** - * Creates a client session with default options. - * - *

      Note: A ClientSession instance can not be used concurrently in multiple operations.

      - * - * @return the client session - * @mongodb.server.release 3.6 - */ - ClientSession startSession(); - - /** - * Creates a client session. - * - *

      Note: A ClientSession instance can not be used concurrently in multiple operations.

      - * - * @param options the options for the client session - * @return the client session - * @mongodb.server.release 3.6 - */ - ClientSession startSession(ClientSessionOptions options); +public interface MongoClient extends MongoCluster, Closeable { /** * Close the client, which will close all underlying cached resources, including, for example, @@ -81,158 +45,6 @@ public interface MongoClient extends Closeable { */ void close(); - /** - * Get a list of the database names - * - * @return an iterable containing all the names of all the databases - * @mongodb.driver.manual reference/command/listDatabases List Databases - */ - MongoIterable listDatabaseNames(); - - /** - * Get a list of the database names - * - * @param clientSession the client session with which to associate this operation - * @return an iterable containing all the names of all the databases - * @mongodb.driver.manual reference/command/listDatabases List Databases - * @mongodb.server.release 3.6 - */ - MongoIterable listDatabaseNames(ClientSession clientSession); - - /** - * Gets the list of databases - * - * @return the list databases iterable interface - */ - ListDatabasesIterable listDatabases(); - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @return the list databases iterable interface - * @mongodb.driver.manual reference/command/listDatabases List Databases - * @mongodb.server.release 3.6 - */ - ListDatabasesIterable listDatabases(ClientSession clientSession); - - /** - * Gets the list of databases - * - * @param resultClass the class to cast the database documents to - * @param the type of the class to use instead of {@code Document}. - * @return the list databases iterable interface - */ - ListDatabasesIterable listDatabases(Class resultClass); - - /** - * Gets the list of databases - * - * @param clientSession the client session with which to associate this operation - * @param resultClass the class to cast the database documents to - * @param the type of the class to use instead of {@code Document}. - * @return the list databases iterable interface - * @mongodb.driver.manual reference/command/listDatabases List Databases - * @mongodb.server.release 3.6 - */ - ListDatabasesIterable listDatabases(ClientSession clientSession, Class resultClass); - - /** - * Creates a change stream for this client. - * - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 3.8 - * @mongodb.server.release 4.0 - */ - ChangeStreamIterable watch(); - - /** - * Creates a change stream for this client. - * - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 3.8 - * @mongodb.server.release 4.0 - */ - ChangeStreamIterable watch(Class resultClass); - - /** - * Creates a change stream for this client. - * - * @param pipeline the aggregation pipeline to apply to the change stream. - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 3.8 - * @mongodb.server.release 4.0 - */ - ChangeStreamIterable watch(List pipeline); - - /** - * Creates a change stream for this client. - * - * @param pipeline the aggregation pipeline to apply to the change stream - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @mongodb.driver.dochub core/changestreams Change Streams - * @since 3.8 - * @mongodb.server.release 4.0 - */ - ChangeStreamIterable watch(List pipeline, Class resultClass); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @return the change stream iterable - * @since 3.8 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamIterable watch(ClientSession clientSession); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @since 3.8 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamIterable watch(ClientSession clientSession, Class resultClass); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream. - * @return the change stream iterable - * @since 3.8 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamIterable watch(ClientSession clientSession, List pipeline); - - /** - * Creates a change stream for this client. - * - * @param clientSession the client session with which to associate this operation - * @param pipeline the aggregation pipeline to apply to the change stream - * @param resultClass the class to decode each document into - * @param the target document type of the iterable. - * @return the change stream iterable - * @since 3.8 - * @mongodb.server.release 4.0 - * @mongodb.driver.dochub core/changestreams Change Streams - */ - ChangeStreamIterable watch(ClientSession clientSession, List pipeline, Class resultClass); - /** * Gets the current cluster description. * diff --git a/driver-sync/src/main/com/mongodb/client/MongoCluster.java b/driver-sync/src/main/com/mongodb/client/MongoCluster.java new file mode 100644 index 00000000000..f901845333b --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoCluster.java @@ -0,0 +1,355 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The client-side representation of a MongoDB cluster operations. + * + *

      + * The originating {@link MongoClient} is responsible for the closing of resources. + * If the originator {@link MongoClient} is closed, then any cluster operations will fail. + *

      + * + * @see MongoClient + * @since 5.2 + */ +@Immutable +public interface MongoCluster { + + /** + * Get the codec registry for the MongoCluster. + * + * @return the {@link org.bson.codecs.configuration.CodecRegistry} + * @since 5.2 + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoCluster. + * + * @return the {@link com.mongodb.ReadPreference} + * @since 5.2 + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoCluster. + * + * @return the {@link com.mongodb.WriteConcern} + * @since 5.2 + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoCluster. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

      If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

      + * + *
        + *
      • {@code null} means that the timeout mechanism for operations will defer to using: + *
          + *
        • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
        • + *
        • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
        • + *
        • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
        • + *
        • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
        • + *
        • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
        • + *
        + *
      • + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoCluster instance with a different codec registry. + * + *

      The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

      + * + * @param codecRegistry the new {@link org.bson.codecs.configuration.CodecRegistry} for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + * @since 5.2 + */ + MongoCluster withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param readPreference the new {@link ReadPreference} for the database + * @return a new MongoCluster instance with the different readPreference + * @since 5.2 + */ + MongoCluster withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param writeConcern the new {@link WriteConcern} for the database + * @return a new MongoCluster instance with the different writeConcern + * @since 5.2 + */ + MongoCluster withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the database + * @return a new MongoCluster instance with the different ReadConcern + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + MongoCluster withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * + *
        + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCluster instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCluster withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Gets a {@link MongoDatabase} instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a {@code MongoDatabase} representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see MongoNamespace#checkDatabaseNameValidity(String) + */ + MongoDatabase getDatabase(String databaseName); + + /** + * Creates a client session with default options. + * + *

      Note: A ClientSession instance can not be used concurrently in multiple operations.

      + * + * @return the client session + * @mongodb.server.release 3.6 + */ + ClientSession startSession(); + + /** + * Creates a client session. + * + *

      Note: A ClientSession instance can not be used concurrently in multiple operations.

      + * + * @param options the options for the client session + * @return the client session + * @mongodb.server.release 3.6 + */ + ClientSession startSession(ClientSessionOptions options); + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + */ + MongoIterable listDatabaseNames(); + + /** + * Get a list of the database names + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + MongoIterable listDatabaseNames(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + ListDatabasesIterable listDatabases(); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesIterable listDatabases(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + */ + ListDatabasesIterable listDatabases(Class resultClass); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesIterable listDatabases(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(); + + /** + * Creates a change stream for this client. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(List pipeline); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline, Class resultClass); +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoCollection.java b/driver-sync/src/main/com/mongodb/client/MongoCollection.java index aa772960e65..7db38040bed 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoCollection.java +++ b/driver-sync/src/main/com/mongodb/client/MongoCollection.java @@ -20,6 +20,8 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.ThreadSafe; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.client.model.BulkWriteOptions; @@ -51,6 +53,7 @@ import org.bson.conversions.Bson; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoCollection interface. @@ -112,6 +115,37 @@ public interface MongoCollection { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

      If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

      + * + *
        + *
      • {@code null} means that the timeout mechanism for operations will defer to using: + *
          + *
        • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
        • + *
        • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
        • + *
        • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
        • + *
        • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
        • + *
        • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
        • + *
        + *
      • + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. * @@ -162,6 +196,23 @@ public interface MongoCollection { */ MongoCollection withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + *
        + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCollection instance with the set time limit for the full execution of an operation + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCollection withTimeout(long timeout, TimeUnit timeUnit); + /** * Counts the number of documents in the collection. * diff --git a/driver-sync/src/main/com/mongodb/client/MongoDatabase.java b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java index 364f7377d4a..1e84a91005a 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoDatabase.java +++ b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java @@ -19,14 +19,18 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.ThreadSafe; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoDatabase interface. @@ -76,6 +80,37 @@ public interface MongoDatabase { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

      If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

      + * + *
        + *
      • {@code null} means that the timeout mechanism for operations will defer to using: + *
          + *
        • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
        • + *
        • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
        • + *
        • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
        • + *
        • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
        • + *
        • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
        • + *
        + *
      • + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -117,6 +152,23 @@ public interface MongoDatabase { */ MongoDatabase withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * + *
        + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoDatabase instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoDatabase withTimeout(long timeout, TimeUnit timeUnit); + /** * Gets a collection. * @@ -140,6 +192,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param command the command to be run * @return the command result */ @@ -148,6 +203,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command * @return the command result @@ -157,6 +215,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param command the command to be run * @param resultClass the class to decode each document into * @param the type of the class to use instead of {@code Document}. @@ -167,6 +228,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command * @param resultClass the class to decode each document into @@ -178,6 +242,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @return the command result @@ -189,6 +256,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command @@ -201,6 +271,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param resultClass the class to decode each document into @@ -214,6 +287,9 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

      Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

      + * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command diff --git a/driver-sync/src/main/com/mongodb/client/MongoIterable.java b/driver-sync/src/main/com/mongodb/client/MongoIterable.java index 75ca9c34e6d..06bec548c77 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoIterable.java +++ b/driver-sync/src/main/com/mongodb/client/MongoIterable.java @@ -74,4 +74,5 @@ public interface MongoIterable extends Iterable { * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size */ MongoIterable batchSize(int batchSize); + } diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java index c32f114844c..5335ed4ce91 100644 --- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java @@ -19,16 +19,21 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; import com.mongodb.annotations.ThreadSafe; import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoDatabase; import com.mongodb.client.gridfs.model.GridFSDownloadOptions; import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.lang.Nullable; import org.bson.BsonValue; import org.bson.conversions.Bson; import org.bson.types.ObjectId; import java.io.InputStream; import java.io.OutputStream; +import java.util.concurrent.TimeUnit; /** * Represents a GridFS Bucket @@ -76,6 +81,37 @@ public interface GridFSBucket { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

      If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

      + * + *
        + *
      • {@code null} means that the timeout mechanism for operations will defer to using: + *
          + *
        • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
        • + *
        • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
        • + *
        • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
        • + *
        • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
        • + *
        • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
        • + *
        + *
      • + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new GridFSBucket instance with a new chunk size in bytes. * @@ -111,6 +147,23 @@ public interface GridFSBucket { */ GridFSBucket withReadConcern(ReadConcern readConcern); + /** + * Create a new GridFSBucket instance with the set time limit for the full execution of an operation. + * + *
        + *
      • {@code 0} means infinite timeout.
      • + *
      • {@code > 0} The time limit to use for the full execution of an operation.
      • + *
      + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new GridFSBucket instance with the set time limit for the full execution of an operation + * @since 4.x + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + GridFSBucket withTimeout(long timeout, TimeUnit timeUnit); + /** * Opens a Stream that the application can write the contents of the file to. *

      @@ -296,6 +349,10 @@ public interface GridFSBucket { * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. *

      * +

      Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

      + * * @param id the custom id value of the file * @param filename the filename for the stream * @param source the Stream providing the file data @@ -310,6 +367,10 @@ public interface GridFSBucket { * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. *

      * +

      Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

      + * * @param id the custom id value of the file * @param filename the filename for the stream * @param source the Stream providing the file data @@ -325,6 +386,10 @@ public interface GridFSBucket { * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. *

      * +

      Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

      + * * @param clientSession the client session with which to associate this operation * @param filename the filename for the stream * @param source the Stream providing the file data @@ -341,6 +406,10 @@ public interface GridFSBucket { * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. *

      * +

      Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

      + * * @param clientSession the client session with which to associate this operation * @param filename the filename for the stream * @param source the Stream providing the file data @@ -358,6 +427,10 @@ public interface GridFSBucket { * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. *

      * +

      Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

      + * * @param clientSession the client session with which to associate this operation * @param id the custom id value of the file * @param filename the filename for the stream @@ -374,6 +447,10 @@ public interface GridFSBucket { * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. *

      * +

      Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

      + * * @param clientSession the client session with which to associate this operation * @param id the custom id value of the file * @param filename the filename for the stream diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java index 963093af6f7..20ac8fc6d44 100644 --- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java @@ -18,6 +18,7 @@ import com.mongodb.MongoClientSettings; import com.mongodb.MongoGridFSException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; @@ -26,12 +27,17 @@ import com.mongodb.client.ListIndexesIterable; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoDatabase; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.gridfs.model.GridFSDownloadOptions; import com.mongodb.client.gridfs.model.GridFSFile; import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.client.internal.TimeoutHelper; import com.mongodb.client.model.IndexOptions; import com.mongodb.client.result.DeleteResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonObjectId; @@ -46,14 +52,17 @@ import java.io.OutputStream; import java.util.ArrayList; import java.util.Map; +import java.util.concurrent.TimeUnit; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.notNull; import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; final class GridFSBucketImpl implements GridFSBucket { private static final int DEFAULT_CHUNKSIZE_BYTES = 255 * 1024; + private static final String TIMEOUT_MESSAGE = "GridFS operation exceeded the timeout limit."; private final String bucketName; private final int chunkSizeBytes; private final MongoCollection filesCollection; @@ -70,6 +79,7 @@ final class GridFSBucketImpl implements GridFSBucket { getChunksCollection(database, bucketName)); } + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection filesCollection, final MongoCollection chunksCollection) { this.bucketName = notNull("bucketName", bucketName); @@ -103,6 +113,11 @@ public ReadConcern getReadConcern() { return filesCollection.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return filesCollection.getTimeout(timeUnit); + } + @Override public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) { return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection, chunksCollection); @@ -126,6 +141,12 @@ public GridFSBucket withReadConcern(final ReadConcern readConcern) { chunksCollection.withReadConcern(readConcern)); } + @Override + public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withTimeout(timeout, timeUnit), + chunksCollection.withTimeout(timeout, timeUnit)); + } + @Override public GridFSUploadStream openUploadStream(final String filename) { return openUploadStream(new BsonObjectId(), filename); @@ -176,12 +197,14 @@ public GridFSUploadStream openUploadStream(final ClientSession clientSession, fi private GridFSUploadStream createGridFSUploadStream(@Nullable final ClientSession clientSession, final BsonValue id, final String filename, final GridFSUploadOptions options) { + Timeout operationTimeout = startTimeout(); notNull("options", options); Integer chunkSizeBytes = options.getChunkSizeBytes(); int chunkSize = chunkSizeBytes == null ? this.chunkSizeBytes : chunkSizeBytes; - checkCreateIndex(clientSession); - return new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, id, filename, chunkSize, - options.getMetadata()); + checkCreateIndex(clientSession, operationTimeout); + return new GridFSUploadStreamImpl(clientSession, filesCollection, + chunksCollection, id, filename, chunkSize, + options.getMetadata(), operationTimeout); } @Override @@ -257,7 +280,10 @@ public GridFSDownloadStream openDownloadStream(final ObjectId id) { @Override public GridFSDownloadStream openDownloadStream(final BsonValue id) { - return createGridFSDownloadStream(null, getFileInfoById(null, id)); + Timeout operationTimeout = startTimeout(); + + GridFSFile fileInfo = getFileInfoById(null, id, operationTimeout); + return createGridFSDownloadStream(null, fileInfo, operationTimeout); } @Override @@ -267,7 +293,9 @@ public GridFSDownloadStream openDownloadStream(final String filename) { @Override public GridFSDownloadStream openDownloadStream(final String filename, final GridFSDownloadOptions options) { - return createGridFSDownloadStream(null, getFileByName(null, filename, options)); + Timeout operationTimeout = startTimeout(); + GridFSFile file = getFileByName(null, filename, options, operationTimeout); + return createGridFSDownloadStream(null, file, operationTimeout); } @Override @@ -278,7 +306,9 @@ public GridFSDownloadStream openDownloadStream(final ClientSession clientSession @Override public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final BsonValue id) { notNull("clientSession", clientSession); - return createGridFSDownloadStream(clientSession, getFileInfoById(clientSession, id)); + Timeout operationTimeout = startTimeout(); + GridFSFile fileInfoById = getFileInfoById(clientSession, id, operationTimeout); + return createGridFSDownloadStream(clientSession, fileInfoById, operationTimeout); } @Override @@ -290,11 +320,14 @@ public GridFSDownloadStream openDownloadStream(final ClientSession clientSession public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final String filename, final GridFSDownloadOptions options) { notNull("clientSession", clientSession); - return createGridFSDownloadStream(clientSession, getFileByName(clientSession, filename, options)); + Timeout operationTimeout = startTimeout(); + GridFSFile file = getFileByName(clientSession, filename, options, operationTimeout); + return createGridFSDownloadStream(clientSession, file, operationTimeout); } - private GridFSDownloadStream createGridFSDownloadStream(@Nullable final ClientSession clientSession, final GridFSFile gridFSFile) { - return new GridFSDownloadStreamImpl(clientSession, gridFSFile, chunksCollection); + private GridFSDownloadStream createGridFSDownloadStream(@Nullable final ClientSession clientSession, final GridFSFile gridFSFile, + @Nullable final Timeout operationTimeout) { + return new GridFSDownloadStreamImpl(clientSession, gridFSFile, chunksCollection, operationTimeout); } @Override @@ -365,7 +398,12 @@ public GridFSFindIterable find(final ClientSession clientSession, final Bson fil } private GridFSFindIterable createGridFSFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter) { - return new GridFSFindIterableImpl(createFindIterable(clientSession, filter)); + return new GridFSFindIterableImpl(createFindIterable(clientSession, filter, startTimeout())); + } + + private GridFSFindIterable createGridFSFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter, + @Nullable final Timeout operationTimeout) { + return new GridFSFindIterableImpl(createFindIterable(clientSession, filter, operationTimeout)); } @Override @@ -390,13 +428,18 @@ public void delete(final ClientSession clientSession, final BsonValue id) { } private void executeDelete(@Nullable final ClientSession clientSession, final BsonValue id) { + Timeout operationTimeout = startTimeout(); DeleteResult result; if (clientSession != null) { - result = filesCollection.deleteOne(clientSession, new BsonDocument("_id", id)); - chunksCollection.deleteMany(clientSession, new BsonDocument("files_id", id)); + result = withNullableTimeout(filesCollection, operationTimeout) + .deleteOne(clientSession, new BsonDocument("_id", id)); + withNullableTimeout(chunksCollection, operationTimeout) + .deleteMany(clientSession, new BsonDocument("files_id", id)); } else { - result = filesCollection.deleteOne(new BsonDocument("_id", id)); - chunksCollection.deleteMany(new BsonDocument("files_id", id)); + result = withNullableTimeout(filesCollection, operationTimeout) + .deleteOne(new BsonDocument("_id", id)); + withNullableTimeout(chunksCollection, operationTimeout) + .deleteMany(new BsonDocument("files_id", id)); } if (result.wasAcknowledged() && result.getDeletedCount() == 0) { @@ -426,12 +469,13 @@ public void rename(final ClientSession clientSession, final BsonValue id, final } private void executeRename(@Nullable final ClientSession clientSession, final BsonValue id, final String newFilename) { + Timeout operationTimeout = startTimeout(); UpdateResult updateResult; if (clientSession != null) { - updateResult = filesCollection.updateOne(clientSession, new BsonDocument("_id", id), + updateResult = withNullableTimeout(filesCollection, operationTimeout).updateOne(clientSession, new BsonDocument("_id", id), new BsonDocument("$set", new BsonDocument("filename", new BsonString(newFilename)))); } else { - updateResult = filesCollection.updateOne(new BsonDocument("_id", id), + updateResult = withNullableTimeout(filesCollection, operationTimeout).updateOne(new BsonDocument("_id", id), new BsonDocument("$set", new BsonDocument("filename", new BsonString(newFilename)))); } @@ -442,15 +486,17 @@ private void executeRename(@Nullable final ClientSession clientSession, final Bs @Override public void drop() { - filesCollection.drop(); - chunksCollection.drop(); + Timeout operationTimeout = startTimeout(); + withNullableTimeout(filesCollection, operationTimeout).drop(); + withNullableTimeout(chunksCollection, operationTimeout).drop(); } @Override public void drop(final ClientSession clientSession) { + Timeout operationTimeout = startTimeout(); notNull("clientSession", clientSession); - filesCollection.drop(clientSession); - chunksCollection.drop(clientSession); + withNullableTimeout(filesCollection, operationTimeout).drop(clientSession); + withNullableTimeout(chunksCollection, operationTimeout).drop(clientSession); } private static MongoCollection getFilesCollection(final MongoDatabase database, final String bucketName) { @@ -463,37 +509,45 @@ private static MongoCollection getChunksCollection(final MongoData return database.getCollection(bucketName + ".chunks", BsonDocument.class).withCodecRegistry(MongoClientSettings.getDefaultCodecRegistry()); } - private void checkCreateIndex(@Nullable final ClientSession clientSession) { + private void checkCreateIndex(@Nullable final ClientSession clientSession, @Nullable final Timeout operationTimeout) { if (!checkedIndexes) { - if (collectionIsEmpty(clientSession, filesCollection.withDocumentClass(Document.class).withReadPreference(primary()))) { + if (collectionIsEmpty(clientSession, + filesCollection.withDocumentClass(Document.class).withReadPreference(primary()), + operationTimeout)) { + Document filesIndex = new Document("filename", 1).append("uploadDate", 1); - if (!hasIndex(clientSession, filesCollection.withReadPreference(primary()), filesIndex)) { - createIndex(clientSession, filesCollection, filesIndex, new IndexOptions()); + if (!hasIndex(clientSession, filesCollection.withReadPreference(primary()), filesIndex, operationTimeout)) { + createIndex(clientSession, filesCollection, filesIndex, new IndexOptions(), operationTimeout); } Document chunksIndex = new Document("files_id", 1).append("n", 1); - if (!hasIndex(clientSession, chunksCollection.withReadPreference(primary()), chunksIndex)) { - createIndex(clientSession, chunksCollection, chunksIndex, new IndexOptions().unique(true)); + if (!hasIndex(clientSession, chunksCollection.withReadPreference(primary()), chunksIndex, operationTimeout)) { + createIndex(clientSession, chunksCollection, chunksIndex, new IndexOptions().unique(true), operationTimeout); } } checkedIndexes = true; } } - private boolean collectionIsEmpty(@Nullable final ClientSession clientSession, final MongoCollection collection) { + private boolean collectionIsEmpty(@Nullable final ClientSession clientSession, + final MongoCollection collection, + @Nullable final Timeout operationTimeout) { if (clientSession != null) { - return collection.find(clientSession).projection(new Document("_id", 1)).first() == null; + return withNullableTimeout(collection, operationTimeout) + .find(clientSession).projection(new Document("_id", 1)).first() == null; } else { - return collection.find().projection(new Document("_id", 1)).first() == null; + return withNullableTimeout(collection, operationTimeout) + .find().projection(new Document("_id", 1)).first() == null; } } - private boolean hasIndex(@Nullable final ClientSession clientSession, final MongoCollection collection, final Document index) { + private boolean hasIndex(@Nullable final ClientSession clientSession, final MongoCollection collection, + final Document index, @Nullable final Timeout operationTimeout) { boolean hasIndex = false; ListIndexesIterable listIndexesIterable; if (clientSession != null) { - listIndexesIterable = collection.listIndexes(clientSession); + listIndexesIterable = withNullableTimeout(collection, operationTimeout).listIndexes(clientSession); } else { - listIndexesIterable = collection.listIndexes(); + listIndexesIterable = withNullableTimeout(collection, operationTimeout).listIndexes(); } ArrayList indexes = listIndexesIterable.into(new ArrayList<>()); @@ -513,16 +567,16 @@ private boolean hasIndex(@Nullable final ClientSession clientSession, final } private void createIndex(@Nullable final ClientSession clientSession, final MongoCollection collection, final Document index, - final IndexOptions indexOptions) { - if (clientSession != null) { - collection.createIndex(clientSession, index, indexOptions); - } else { - collection.createIndex(index, indexOptions); - } + final IndexOptions indexOptions, final @Nullable Timeout operationTimeout) { + if (clientSession != null) { + withNullableTimeout(collection, operationTimeout).createIndex(clientSession, index, indexOptions); + } else { + withNullableTimeout(collection, operationTimeout).createIndex(index, indexOptions); + } } private GridFSFile getFileByName(@Nullable final ClientSession clientSession, final String filename, - final GridFSDownloadOptions options) { + final GridFSDownloadOptions options, @Nullable final Timeout operationTimeout) { int revision = options.getRevision(); int skip; int sort; @@ -534,7 +588,7 @@ private GridFSFile getFileByName(@Nullable final ClientSession clientSession, fi sort = -1; } - GridFSFile fileInfo = createGridFSFindIterable(clientSession, new Document("filename", filename)).skip(skip) + GridFSFile fileInfo = createGridFSFindIterable(clientSession, new Document("filename", filename), operationTimeout).skip(skip) .sort(new Document("uploadDate", sort)).first(); if (fileInfo == null) { throw new MongoGridFSException(format("No file found with the filename: %s and revision: %s", filename, revision)); @@ -542,25 +596,30 @@ private GridFSFile getFileByName(@Nullable final ClientSession clientSession, fi return fileInfo; } - private GridFSFile getFileInfoById(@Nullable final ClientSession clientSession, final BsonValue id) { + private GridFSFile getFileInfoById(@Nullable final ClientSession clientSession, final BsonValue id, + @Nullable final Timeout operationTImeout) { notNull("id", id); - GridFSFile fileInfo = createFindIterable(clientSession, new Document("_id", id)).first(); + GridFSFile fileInfo = createFindIterable(clientSession, new Document("_id", id), operationTImeout).first(); if (fileInfo == null) { throw new MongoGridFSException(format("No file found with the id: %s", id)); } return fileInfo; } - private FindIterable createFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter) { + private FindIterable createFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter, + @Nullable final Timeout operationTImeout) { FindIterable findIterable; if (clientSession != null) { - findIterable = filesCollection.find(clientSession); + findIterable = withNullableTimeout(filesCollection, operationTImeout).find(clientSession); } else { - findIterable = filesCollection.find(); + findIterable = withNullableTimeout(filesCollection, operationTImeout).find(); } if (filter != null) { findIterable = findIterable.filter(filter); } + if (filesCollection.getTimeout(MILLISECONDS) != null) { + findIterable.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } return findIterable; } @@ -572,6 +631,8 @@ private void downloadToStream(final GridFSDownloadStream downloadStream, final O while ((len = downloadStream.read(buffer)) != -1) { destination.write(buffer, 0, len); } + } catch (MongoOperationTimeoutException e){ + throw e; } catch (IOException e) { savedThrowable = new MongoGridFSException("IOException when reading from the OutputStream", e); } catch (Exception e) { @@ -587,4 +648,14 @@ private void downloadToStream(final GridFSDownloadStream downloadStream, final O } } } + + private static MongoCollection withNullableTimeout(final MongoCollection chunksCollection, + @Nullable final Timeout timeout) { + return TimeoutHelper.collectionWithTimeout(chunksCollection, TIMEOUT_MESSAGE, timeout); + } + + @Nullable + private Timeout startTimeout() { + return TimeoutContext.startTimeout(filesCollection.getTimeout(MILLISECONDS)); + } } diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java index c9f6607d144..709ae68138b 100644 --- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java @@ -21,7 +21,10 @@ import com.mongodb.client.FindIterable; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.internal.TimeoutHelper; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonBinary; import org.bson.BsonDocument; @@ -33,13 +36,18 @@ import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.Locks.withInterruptibleLock; +import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException; import static java.lang.String.format; class GridFSDownloadStreamImpl extends GridFSDownloadStream { + private static final String TIMEOUT_MESSAGE = "The GridFS download stream exceeded the timeout limit."; private final ClientSession clientSession; private final GridFSFile fileInfo; private final MongoCollection chunksCollection; private final BsonValue fileId; + /** + * The length, in bytes of the file to download. + */ private final long length; private final int chunkSizeInBytes; private final int numberOfChunks; @@ -47,16 +55,20 @@ class GridFSDownloadStreamImpl extends GridFSDownloadStream { private int batchSize; private int chunkIndex; private int bufferOffset; + /** + * Current byte position in the file. + */ private long currentPosition; private byte[] buffer = null; private long markPosition; - + @Nullable + private final Timeout timeout; private final ReentrantLock closeLock = new ReentrantLock(); private final ReentrantLock cursorLock = new ReentrantLock(); private boolean closed = false; GridFSDownloadStreamImpl(@Nullable final ClientSession clientSession, final GridFSFile fileInfo, - final MongoCollection chunksCollection) { + final MongoCollection chunksCollection, @Nullable final Timeout timeout) { this.clientSession = clientSession; this.fileInfo = notNull("file information", fileInfo); this.chunksCollection = notNull("chunks collection", chunksCollection); @@ -65,6 +77,7 @@ class GridFSDownloadStreamImpl extends GridFSDownloadStream { length = fileInfo.getLength(); chunkSizeInBytes = fileInfo.getChunkSize(); numberOfChunks = (int) Math.ceil((double) length / chunkSizeInBytes); + this.timeout = timeout; } @Override @@ -98,6 +111,7 @@ public int read(final byte[] b) { @Override public int read(final byte[] b, final int off, final int len) { checkClosed(); + checkTimeout(); if (currentPosition == length) { return -1; @@ -119,6 +133,7 @@ public int read(final byte[] b, final int off, final int len) { @Override public long skip(final long bytesToSkip) { checkClosed(); + checkTimeout(); if (bytesToSkip <= 0) { return 0; } @@ -147,6 +162,7 @@ public long skip(final long bytesToSkip) { @Override public int available() { checkClosed(); + checkTimeout(); if (buffer == null) { return 0; } else { @@ -167,6 +183,7 @@ public void mark(final int readlimit) { @Override public void reset() { checkClosed(); + checkTimeout(); if (currentPosition == markPosition) { return; } @@ -196,6 +213,11 @@ public void close() { }); } + private void checkTimeout() { + Timeout.onExistsAndExpired(timeout, () -> { + throw createMongoTimeoutException(TIMEOUT_MESSAGE); + }); + } private void checkClosed() { withInterruptibleLock(closeLock, () -> { if (closed) { @@ -237,11 +259,15 @@ private MongoCursor getCursor(final int startChunkIndex) { FindIterable findIterable; BsonDocument filter = new BsonDocument("files_id", fileId).append("n", new BsonDocument("$gte", new BsonInt32(startChunkIndex))); if (clientSession != null) { - findIterable = chunksCollection.find(clientSession, filter); + findIterable = withNullableTimeout(chunksCollection, timeout).find(clientSession, filter); } else { - findIterable = chunksCollection.find(filter); + findIterable = withNullableTimeout(chunksCollection, timeout).find(filter); } - return findIterable.batchSize(batchSize).sort(new BsonDocument("n", new BsonInt32(1))).iterator(); + if (timeout != null){ + findIterable.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } + return findIterable.batchSize(batchSize) + .sort(new BsonDocument("n", new BsonInt32(1))).iterator(); } private byte[] getBufferFromChunk(@Nullable final BsonDocument chunk, final int expectedChunkIndex) { @@ -280,4 +306,9 @@ private byte[] getBufferFromChunk(@Nullable final BsonDocument chunk, final int private byte[] getBuffer(final int chunkIndexToFetch) { return getBufferFromChunk(getChunk(chunkIndexToFetch), chunkIndexToFetch); } + + private MongoCollection withNullableTimeout(final MongoCollection chunksCollection, + @Nullable final Timeout timeout) { + return TimeoutHelper.collectionWithTimeout(chunksCollection, TIMEOUT_MESSAGE, timeout); + } } diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java index 26ef5f85934..240cecf78b3 100644 --- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java @@ -20,6 +20,9 @@ import com.mongodb.client.ClientSession; import com.mongodb.client.MongoCollection; import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.internal.TimeoutHelper; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonBinary; import org.bson.BsonDocument; @@ -35,6 +38,7 @@ import static com.mongodb.internal.Locks.withInterruptibleLock; final class GridFSUploadStreamImpl extends GridFSUploadStream { + public static final String TIMEOUT_MESSAGE = "The GridFS upload stream exceeded the timeout limit."; private final ClientSession clientSession; private final MongoCollection filesCollection; private final MongoCollection chunksCollection; @@ -46,13 +50,14 @@ final class GridFSUploadStreamImpl extends GridFSUploadStream { private long lengthInBytes; private int bufferOffset; private int chunkIndex; - + @Nullable + private final Timeout timeout; private final ReentrantLock closeLock = new ReentrantLock(); private boolean closed = false; GridFSUploadStreamImpl(@Nullable final ClientSession clientSession, final MongoCollection filesCollection, final MongoCollection chunksCollection, final BsonValue fileId, final String filename, - final int chunkSizeBytes, @Nullable final Document metadata) { + final int chunkSizeBytes, @Nullable final Document metadata, @Nullable final Timeout timeout) { this.clientSession = clientSession; this.filesCollection = notNull("files collection", filesCollection); this.chunksCollection = notNull("chunks collection", chunksCollection); @@ -63,6 +68,7 @@ final class GridFSUploadStreamImpl extends GridFSUploadStream { chunkIndex = 0; bufferOffset = 0; buffer = new byte[chunkSizeBytes]; + this.timeout = timeout; } @Override @@ -86,9 +92,11 @@ public void abort() { }); if (clientSession != null) { - chunksCollection.deleteMany(clientSession, new Document("files_id", fileId)); + withNullableTimeout(chunksCollection, timeout) + .deleteMany(clientSession, new Document("files_id", fileId)); } else { - chunksCollection.deleteMany(new Document("files_id", fileId)); + withNullableTimeout(chunksCollection, timeout) + .deleteMany(new Document("files_id", fileId)); } } @@ -107,6 +115,7 @@ public void write(final byte[] b) { @Override public void write(final byte[] b, final int off, final int len) { checkClosed(); + checkTimeout(); notNull("b", b); if ((off < 0) || (off > b.length) || (len < 0) @@ -138,6 +147,10 @@ public void write(final byte[] b, final int off, final int len) { } } + private void checkTimeout() { + Timeout.onExistsAndExpired(timeout, () -> TimeoutContext.throwMongoTimeoutException(TIMEOUT_MESSAGE)); + } + @Override public void close() { boolean alreadyClosed = withInterruptibleLock(closeLock, () -> { @@ -152,9 +165,9 @@ public void close() { GridFSFile gridFSFile = new GridFSFile(fileId, filename, lengthInBytes, chunkSizeBytes, new Date(), metadata); if (clientSession != null) { - filesCollection.insertOne(clientSession, gridFSFile); + withNullableTimeout(filesCollection, timeout).insertOne(clientSession, gridFSFile); } else { - filesCollection.insertOne(gridFSFile); + withNullableTimeout(filesCollection, timeout).insertOne(gridFSFile); } buffer = null; } @@ -162,10 +175,15 @@ public void close() { private void writeChunk() { if (bufferOffset > 0) { if (clientSession != null) { - chunksCollection.insertOne(clientSession, new BsonDocument("files_id", fileId).append("n", new BsonInt32(chunkIndex)) - .append("data", getData())); + withNullableTimeout(chunksCollection, timeout) + .insertOne(clientSession, new BsonDocument("files_id", fileId) + .append("n", new BsonInt32(chunkIndex)) + .append("data", getData())); } else { - chunksCollection.insertOne(new BsonDocument("files_id", fileId).append("n", new BsonInt32(chunkIndex)).append("data", getData())); + withNullableTimeout(chunksCollection, timeout) + .insertOne(new BsonDocument("files_id", fileId) + .append("n", new BsonInt32(chunkIndex)) + .append("data", getData())); } chunkIndex++; bufferOffset = 0; @@ -188,4 +206,9 @@ private void checkClosed() { } }); } + + private static MongoCollection withNullableTimeout(final MongoCollection collection, + @Nullable final Timeout timeout) { + return TimeoutHelper.collectionWithTimeout(collection, TIMEOUT_MESSAGE, timeout); + } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java index 6559e029d4e..23c8fb35283 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java @@ -23,7 +23,9 @@ import com.mongodb.WriteConcern; import com.mongodb.client.AggregateIterable; import com.mongodb.client.ClientSession; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.operation.BatchCursor; @@ -62,29 +64,25 @@ class AggregateIterableImpl extends MongoIterableImpl documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final List pipeline, final AggregationLevel aggregationLevel) { + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, + final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads, + final TimeoutSettings timeoutSettings) { this(clientSession, new MongoNamespace(databaseName, "ignored"), documentClass, resultClass, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, aggregationLevel, true); - } - - AggregateIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads) { - this(clientSession, new MongoNamespace(databaseName, "ignored"), documentClass, resultClass, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, aggregationLevel, retryReads); + readConcern, writeConcern, executor, pipeline, aggregationLevel, retryReads, timeoutSettings); } + @SuppressWarnings("checkstyle:ParameterNumber") AggregateIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, + final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads, + final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - true, retryReads); + true, retryReads, timeoutSettings); this.namespace = notNull("namespace", namespace); this.documentClass = notNull("documentClass", documentClass); this.resultClass = notNull("resultClass", resultClass); @@ -100,8 +98,10 @@ public void toCollection() { throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge"); } - getExecutor().execute(operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, - hintString, comment, variables, aggregationLevel), getReadPreference(), getReadConcern(), getClientSession()); + getExecutor().execute( + operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, + bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), + getReadPreference(), getReadConcern(), getClientSession()); } @Override @@ -116,6 +116,12 @@ public AggregateIterable batchSize(final int batchSize) { return this; } + @Override + public AggregateIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public AggregateIterable maxTime(final long maxTime, final TimeUnit timeUnit) { notNull("timeUnit", timeUnit); @@ -125,8 +131,7 @@ public AggregateIterable maxTime(final long maxTime, final TimeUnit tim @Override public AggregateIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); return this; } @@ -194,16 +199,20 @@ public E explain(final Class explainResultClass, final ExplainVerbosity v private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getExecutor().execute(asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), - getReadPreference(), getReadConcern(), getClientSession()); + return getExecutor().execute( + asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), + getReadConcern(), getClientSession()); } @Override public ReadOperation> asReadOperation() { MongoNamespace outNamespace = getOutNamespace(); if (outNamespace != null) { - getExecutor().execute(operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, - hint, hintString, comment, variables, aggregationLevel), getReadPreference(), getReadConcern(), getClientSession()); + validateTimeoutMode(); + getExecutor().execute( + operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, + bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), + getReadPreference(), getReadConcern(), getClientSession()); FindOptions findOptions = new FindOptions().collation(collation); Integer batchSize = getBatchSize(); @@ -216,9 +225,13 @@ public ReadOperation> asReadOperation() { } } + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS)); + } + private ExplainableReadOperation> asAggregateOperation() { - return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, getBatchSize(), collation, - hint, hintString, comment, variables, allowDiskUse, aggregationLevel); + return operations.aggregate(pipeline, resultClass, getTimeoutMode(), getBatchSize(), collation, hint, hintString, comment, + variables, allowDiskUse, aggregationLevel); } @Nullable @@ -269,4 +282,11 @@ private MongoNamespace getOutNamespace() { return null; } + + private void validateTimeoutMode() { + if (getTimeoutMode() == TimeoutMode.ITERATION) { + throw new IllegalArgumentException("Aggregations that output to a collection do not support the ITERATION value for the " + + "timeoutMode option."); + } + } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java index d50b20cf0e9..4b7b3865569 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java @@ -28,6 +28,7 @@ import com.mongodb.client.model.changestream.ChangeStreamDocument; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ReadOperation; @@ -47,7 +48,6 @@ import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

      This class is not part of the public API and may be removed or changed at any time

      @@ -70,23 +70,23 @@ public class ChangeStreamIterableImpl extends MongoIterableImpl pipeline, final Class resultClass, - final ChangeStreamLevel changeStreamLevel, final boolean retryReads) { + final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, + final OperationExecutor executor, final List pipeline, final Class resultClass, + final ChangeStreamLevel changeStreamLevel, final boolean retryReads, final TimeoutSettings timeoutSettings) { this(clientSession, new MongoNamespace(databaseName, "ignored"), codecRegistry, readPreference, readConcern, executor, pipeline, - resultClass, changeStreamLevel, retryReads); + resultClass, changeStreamLevel, retryReads, timeoutSettings); } public ChangeStreamIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final OperationExecutor executor, final List pipeline, final Class resultClass, - final ChangeStreamLevel changeStreamLevel, final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); + final ChangeStreamLevel changeStreamLevel, final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); this.codecRegistry = notNull("codecRegistry", codecRegistry); this.pipeline = notNull("pipeline", pipeline); this.codec = ChangeStreamDocument.createCodec(notNull("resultClass", resultClass), codecRegistry); this.changeStreamLevel = notNull("changeStreamLevel", changeStreamLevel); - this.operations = new SyncOperations<>(namespace, resultClass, readPreference, codecRegistry, retryReads); + this.operations = new SyncOperations<>(namespace, resultClass, readPreference, codecRegistry, retryReads, timeoutSettings); } @Override @@ -115,8 +115,7 @@ public ChangeStreamIterable batchSize(final int batchSize) { @Override public ChangeStreamIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxAwaitTimeMS = MILLISECONDS.convert(maxAwaitTime, timeUnit); + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); return this; } @@ -128,7 +127,8 @@ public ChangeStreamIterable collation(@Nullable final Collation collati @Override public MongoIterable withDocumentClass(final Class clazz) { - return new MongoIterableImpl(getClientSession(), getExecutor(), getReadConcern(), getReadPreference(), getRetryReads()) { + return new MongoIterableImpl(getClientSession(), getExecutor(), getReadConcern(), getReadPreference(), getRetryReads(), + getTimeoutSettings()) { @Override public MongoCursor iterator() { return cursor(); @@ -143,6 +143,12 @@ public MongoChangeStreamCursor cursor() { public ReadOperation> asReadOperation() { throw new UnsupportedOperationException(); } + + @Override + + protected OperationExecutor getExecutor() { + return ChangeStreamIterableImpl.this.getExecutor(); + } }; } @@ -203,9 +209,14 @@ public ReadOperation>> asReadOperation throw new UnsupportedOperationException(); } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(0, maxAwaitTimeMS)); + } + private ReadOperation> createChangeStreamOperation() { return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, new RawBsonDocumentCodec(), changeStreamLevel, - getBatchSize(), collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + getBatchSize(), collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } private BatchCursor execute() { diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java index fad1c711d64..3edef6b937d 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java @@ -22,6 +22,7 @@ import com.mongodb.MongoUpdatedEncryptedFieldsException; import com.mongodb.ReadConcern; import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteResult; import com.mongodb.client.FindIterable; import com.mongodb.client.MongoClient; import com.mongodb.client.MongoClients; @@ -38,7 +39,10 @@ import com.mongodb.client.model.vault.RewrapManyDataKeyResult; import com.mongodb.client.result.DeleteResult; import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonBinary; import org.bson.BsonDocument; @@ -54,11 +58,14 @@ import java.util.stream.Collectors; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; import static com.mongodb.internal.capi.MongoCryptHelper.validateRewrapManyDataKeyOptions; import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.internal.BsonUtil.mutableDeepCopy; /** @@ -80,10 +87,22 @@ public ClientEncryptionImpl(final MongoClient keyVaultClient, final ClientEncryp this.crypt = Crypts.create(keyVaultClient, options); this.options = options; MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace()); - this.collection = keyVaultClient.getDatabase(namespace.getDatabaseName()) + this.collection = getVaultCollection(keyVaultClient, options, namespace); + } + + private static MongoCollection getVaultCollection(final MongoClient keyVaultClient, + final ClientEncryptionSettings options, + final MongoNamespace namespace) { + MongoCollection vaultCollection = keyVaultClient.getDatabase(namespace.getDatabaseName()) .getCollection(namespace.getCollectionName(), BsonDocument.class) .withWriteConcern(WriteConcern.MAJORITY) .withReadConcern(ReadConcern.MAJORITY); + + Long timeoutMs = options.getTimeout(MILLISECONDS); + if (timeoutMs != null){ + vaultCollection = vaultCollection.withTimeout(timeoutMs, MILLISECONDS); + } + return vaultCollection; } @Override @@ -93,39 +112,48 @@ public BsonBinary createDataKey(final String kmsProvider) { @Override public BsonBinary createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions) { - BsonDocument dataKeyDocument = crypt.createDataKey(kmsProvider, dataKeyOptions); - collection.insertOne(dataKeyDocument); + Timeout operationTimeout = startTimeout(); + return createDataKey(kmsProvider, dataKeyOptions, operationTimeout); + } + + public BsonBinary createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions, @Nullable final Timeout operationTimeout) { + BsonDocument dataKeyDocument = crypt.createDataKey(kmsProvider, dataKeyOptions, operationTimeout); + collectionWithTimeout(collection, "Data key insertion exceeded the timeout limit.", operationTimeout).insertOne(dataKeyDocument); return dataKeyDocument.getBinary("_id"); } @Override public BsonBinary encrypt(final BsonValue value, final EncryptOptions options) { - return crypt.encryptExplicitly(value, options); + Timeout operationTimeout = startTimeout(); + return crypt.encryptExplicitly(value, options, operationTimeout); } @Override public BsonDocument encryptExpression(final Bson expression, final EncryptOptions options) { - return crypt.encryptExpression(expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options); + Timeout operationTimeout = startTimeout(); + return crypt.encryptExpression(expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options, + operationTimeout); } @Override public BsonValue decrypt(final BsonBinary value) { - return crypt.decryptExplicitly(value); + Timeout operationTimeout = startTimeout(); + return crypt.decryptExplicitly(value, operationTimeout); } @Override public DeleteResult deleteKey(final BsonBinary id) { - return collection.deleteOne(Filters.eq("_id", id)); + return collectionWithTimeout(collection, startTimeout()).deleteOne(Filters.eq("_id", id)); } @Override public BsonDocument getKey(final BsonBinary id) { - return collection.find(Filters.eq("_id", id)).first(); + return collectionWithTimeout(collection, startTimeout()).find(Filters.eq("_id", id)).first(); } @Override public FindIterable getKeys() { - return collection.find(); + return collectionWithTimeout(collection, startTimeout()).find(); } @Override @@ -170,7 +198,9 @@ public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter) { @Override public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter, final RewrapManyDataKeyOptions options) { validateRewrapManyDataKeyOptions(options); - BsonDocument results = crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options); + Timeout operationTimeout = startTimeout(); + BsonDocument results = crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), + options, operationTimeout); if (results.isEmpty()) { return new RewrapManyDataKeyResult(); } @@ -183,7 +213,8 @@ public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter, final Rewrap Updates.currentDate("updateDate")) ); }).collect(Collectors.toList()); - return new RewrapManyDataKeyResult(collection.bulkWrite(updateModels)); + BulkWriteResult bulkWriteResult = collectionWithTimeout(collection, operationTimeout).bulkWrite(updateModels); + return new RewrapManyDataKeyResult(bulkWriteResult); } @Override @@ -192,6 +223,7 @@ public BsonDocument createEncryptedCollection(final MongoDatabase database, fina notNull("collectionName", collectionName); notNull("createCollectionOptions", createCollectionOptions); notNull("createEncryptedCollectionParams", createEncryptedCollectionParams); + Timeout operationTimeout = startTimeout(); MongoNamespace namespace = new MongoNamespace(database.getName(), collectionName); Bson rawEncryptedFields = createCollectionOptions.getEncryptedFields(); if (rawEncryptedFields == null) { @@ -222,10 +254,10 @@ public BsonDocument createEncryptedCollection(final MongoDatabase database, fina // It is crucial to set the `dataKeyMightBeCreated` flag either immediately before calling `createDataKey`, // or after that in a `finally` block. dataKeyMightBeCreated.set(true); - BsonBinary dataKeyId = createDataKey(kmsProvider, dataKeyOptions); + BsonBinary dataKeyId = createDataKey(kmsProvider, dataKeyOptions, operationTimeout); field.put(keyIdBsonKey, dataKeyId); }); - database.createCollection(collectionName, + databaseWithTimeout(database, operationTimeout).createCollection(collectionName, new CreateCollectionOptions(createCollectionOptions).encryptedFields(maybeUpdatedEncryptedFields)); return maybeUpdatedEncryptedFields; } catch (Exception e) { @@ -236,7 +268,7 @@ public BsonDocument createEncryptedCollection(final MongoDatabase database, fina } } } else { - database.createCollection(collectionName, createCollectionOptions); + databaseWithTimeout(database, operationTimeout).createCollection(collectionName, createCollectionOptions); return encryptedFields; } } @@ -246,4 +278,9 @@ public void close() { crypt.close(); keyVaultClient.close(); } + + @Nullable + private Timeout startTimeout() { + return TimeoutContext.startTimeout(options.getTimeout(MILLISECONDS)); + } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java index a265ca01a7d..2d8a4dbfb30 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java @@ -18,11 +18,8 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; import com.mongodb.client.ClientSession; import com.mongodb.connection.ClusterType; -import com.mongodb.internal.connection.OperationContext; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.binding.AbstractReferenceCounted; import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; @@ -30,9 +27,8 @@ import com.mongodb.internal.binding.ReadWriteBinding; import com.mongodb.internal.binding.TransactionContext; import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.session.ClientSessionContext; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; import java.util.function.Supplier; @@ -48,14 +44,14 @@ public class ClientSessionBinding extends AbstractReferenceCounted implements Re private final ClusterAwareReadWriteBinding wrapped; private final ClientSession session; private final boolean ownsSession; - private final ClientSessionContext sessionContext; + private final OperationContext operationContext; public ClientSessionBinding(final ClientSession session, final boolean ownsSession, final ClusterAwareReadWriteBinding wrapped) { this.wrapped = wrapped; wrapped.retain(); this.session = notNull("session", session); this.ownsSession = ownsSession; - this.sessionContext = new SyncClientSessionContext(session); + this.operationContext = wrapped.getOperationContext().withSessionContext(new SyncClientSessionContext(session)); } @Override @@ -102,25 +98,9 @@ public ConnectionSource getWriteConnectionSource() { return new SessionBindingConnectionSource(getConnectionSource(wrapped::getWriteConnectionSource)); } - @Override - public SessionContext getSessionContext() { - return sessionContext; - } - - @Override - @Nullable - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); - } - @Override public OperationContext getOperationContext() { - return wrapped.getOperationContext(); + return operationContext; } private ConnectionSource getConnectionSource(final Supplier wrappedConnectionSourceSupplier) { @@ -155,24 +135,9 @@ public ServerDescription getServerDescription() { return wrapped.getServerDescription(); } - @Override - public SessionContext getSessionContext() { - return sessionContext; - } - @Override public OperationContext getOperationContext() { - return wrapped.getOperationContext(); - } - - @Override - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); + return operationContext; } @Override @@ -250,7 +215,7 @@ public ReadConcern getReadConcern() { } else if (isSnapshot()) { return ReadConcern.SNAPSHOT; } else { - return wrapped.getSessionContext().getReadConcern(); + return wrapped.getOperationContext().getSessionContext().getReadConcern(); } } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java index 4a6afe4101b..d3bbd850ae0 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java @@ -21,17 +21,21 @@ import com.mongodb.MongoException; import com.mongodb.MongoExecutionTimeoutException; import com.mongodb.MongoInternalException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.ReadConcern; import com.mongodb.TransactionOptions; import com.mongodb.WriteConcern; import com.mongodb.client.ClientSession; import com.mongodb.client.TransactionBody; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.operation.AbortTransactionOperation; import com.mongodb.internal.operation.CommitTransactionOperation; import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteConcernHelper; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.internal.session.BaseClientSessionImpl; import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; @@ -39,26 +43,21 @@ import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; final class ClientSessionImpl extends BaseClientSessionImpl implements ClientSession { - private enum TransactionState { - NONE, IN, COMMITTED, ABORTED - } - private static final int MAX_RETRY_TIME_LIMIT_MS = 120000; - private final MongoClientDelegate delegate; + private final OperationExecutor operationExecutor; private TransactionState transactionState = TransactionState.NONE; private boolean messageSentInCurrentTransaction; private boolean commitInProgress; private TransactionOptions transactionOptions; ClientSessionImpl(final ServerSessionPool serverSessionPool, final Object originator, final ClientSessionOptions options, - final MongoClientDelegate delegate) { + final OperationExecutor operationExecutor) { super(serverSessionPool, originator, options); - this.delegate = delegate; + this.operationExecutor = operationExecutor; } @Override @@ -104,6 +103,47 @@ public void startTransaction() { @Override public void startTransaction(final TransactionOptions transactionOptions) { + startTransaction(transactionOptions, createTimeoutContext(transactionOptions)); + } + + @Override + public void commitTransaction() { + commitTransaction(true); + } + + @Override + public void abortTransaction() { + if (transactionState == TransactionState.ABORTED) { + throw new IllegalStateException("Cannot call abortTransaction twice"); + } + if (transactionState == TransactionState.COMMITTED) { + throw new IllegalStateException("Cannot call abortTransaction after calling commitTransaction"); + } + if (transactionState == TransactionState.NONE) { + throw new IllegalStateException("There is no transaction started"); + } + try { + if (messageSentInCurrentTransaction) { + ReadConcern readConcern = transactionOptions.getReadConcern(); + if (readConcern == null) { + throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); + } + resetTimeout(); + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + operationExecutor + .execute(new AbortTransactionOperation(writeConcern) + .recoveryToken(getRecoveryToken()), readConcern, this); + } + } catch (RuntimeException e) { + // ignore exceptions from abort + } finally { + clearTransactionContext(); + cleanupTransaction(TransactionState.ABORTED); + } + } + + private void startTransaction(final TransactionOptions transactionOptions, final TimeoutContext timeoutContext) { Boolean snapshot = getOptions().isSnapshot(); if (snapshot != null && snapshot) { throw new IllegalArgumentException("Transactions are not supported in snapshot sessions"); @@ -119,7 +159,7 @@ public void startTransaction(final TransactionOptions transactionOptions) { } getServerSession().advanceTransactionNumber(); this.transactionOptions = TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions()); - WriteConcern writeConcern = this.transactionOptions.getWriteConcern(); + WriteConcern writeConcern = getWriteConcern(timeoutContext); if (writeConcern == null) { throw new MongoInternalException("Invariant violated. Transaction options write concern can not be null"); } @@ -127,10 +167,19 @@ public void startTransaction(final TransactionOptions transactionOptions) { throw new MongoClientException("Transactions do not support unacknowledged write concern"); } clearTransactionContext(); + setTimeoutContext(timeoutContext); } - @Override - public void commitTransaction() { + @Nullable + private WriteConcern getWriteConcern(@Nullable final TimeoutContext timeoutContext) { + WriteConcern writeConcern = transactionOptions.getWriteConcern(); + if (hasTimeoutMS(timeoutContext) && hasWTimeoutMS(writeConcern)) { + return WriteConcernHelper.cloneWithoutTimeout(writeConcern); + } + return writeConcern; + } + + private void commitTransaction(final boolean resetTimeout) { if (transactionState == TransactionState.ABORTED) { throw new IllegalStateException("Cannot call commitTransaction after calling abortTransaction"); } @@ -145,11 +194,15 @@ public void commitTransaction() { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } commitInProgress = true; - delegate.getOperationExecutor().execute(new CommitTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()), - transactionState == TransactionState.COMMITTED) - .recoveryToken(getRecoveryToken()) - .maxCommitTime(transactionOptions.getMaxCommitTime(MILLISECONDS), MILLISECONDS), - readConcern, this); + if (resetTimeout) { + resetTimeout(); + } + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + operationExecutor + .execute(new CommitTransactionOperation(writeConcern, + transactionState == TransactionState.COMMITTED) + .recoveryToken(getRecoveryToken()), readConcern, this); } } catch (MongoException e) { clearTransactionContextOnError(e); @@ -160,35 +213,6 @@ public void commitTransaction() { } } - @Override - public void abortTransaction() { - if (transactionState == TransactionState.ABORTED) { - throw new IllegalStateException("Cannot call abortTransaction twice"); - } - if (transactionState == TransactionState.COMMITTED) { - throw new IllegalStateException("Cannot call abortTransaction after calling commitTransaction"); - } - if (transactionState == TransactionState.NONE) { - throw new IllegalStateException("There is no transaction started"); - } - try { - if (messageSentInCurrentTransaction) { - ReadConcern readConcern = transactionOptions.getReadConcern(); - if (readConcern == null) { - throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); - } - delegate.getOperationExecutor().execute(new AbortTransactionOperation(assertNotNull(transactionOptions.getWriteConcern())) - .recoveryToken(getRecoveryToken()), - readConcern, this); - } - } catch (RuntimeException e) { - // ignore exceptions from abort - } finally { - clearTransactionContext(); - cleanupTransaction(TransactionState.ABORTED); - } - } - private void clearTransactionContextOnError(final MongoException e) { if (e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) || e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { clearTransactionContext(); @@ -204,17 +228,19 @@ public T withTransaction(final TransactionBody transactionBody) { public T withTransaction(final TransactionBody transactionBody, final TransactionOptions options) { notNull("transactionBody", transactionBody); long startTime = ClientSessionClock.INSTANCE.now(); + TimeoutContext withTransactionTimeoutContext = createTimeoutContext(options); + outer: while (true) { T retVal; try { - startTransaction(options); + startTransaction(options, withTransactionTimeoutContext.copyTimeoutContext()); retVal = transactionBody.execute(); } catch (Throwable e) { if (transactionState == TransactionState.IN) { abortTransaction(); } - if (e instanceof MongoException) { + if (e instanceof MongoException && !(e instanceof MongoOperationTimeoutException)) { if (((MongoException) e).hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) && ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) { continue; @@ -225,11 +251,12 @@ public T withTransaction(final TransactionBody transactionBody, final Tra if (transactionState == TransactionState.IN) { while (true) { try { - commitTransaction(); + commitTransaction(false); break; } catch (MongoException e) { clearTransactionContextOnError(e); - if (ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) { + if (!(e instanceof MongoOperationTimeoutException) + && ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) { applyMajorityWriteConcernToTransactionOptions(); if (!(e instanceof MongoExecutionTimeoutException) @@ -247,10 +274,23 @@ public T withTransaction(final TransactionBody transactionBody, final Tra } } + @Override + public void close() { + try { + if (transactionState == TransactionState.IN) { + abortTransaction(); + } + } finally { + clearTransactionContext(); + super.close(); + } + } + // Apply majority write concern if the commit is to be retried. private void applyMajorityWriteConcernToTransactionOptions() { if (transactionOptions != null) { - WriteConcern writeConcern = transactionOptions.getWriteConcern(); + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = getWriteConcern(timeoutContext); if (writeConcern != null) { transactionOptions = TransactionOptions.merge(TransactionOptions.builder() .writeConcern(writeConcern.withW("majority")).build(), transactionOptions); @@ -263,21 +303,16 @@ private void applyMajorityWriteConcernToTransactionOptions() { } } - @Override - public void close() { - try { - if (transactionState == TransactionState.IN) { - abortTransaction(); - } - } finally { - clearTransactionContext(); - super.close(); - } - } - private void cleanupTransaction(final TransactionState nextState) { messageSentInCurrentTransaction = false; transactionOptions = null; transactionState = nextState; + setTimeoutContext(null); + } + + private TimeoutContext createTimeoutContext(final TransactionOptions transactionOptions) { + return new TimeoutContext(getTimeoutSettings( + TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions()), + operationExecutor.getTimeoutSettings())); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java b/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java index 6098aef53b8..934a3dce486 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java +++ b/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java @@ -17,13 +17,16 @@ package com.mongodb.client.internal; import com.mongodb.client.MongoClient; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; class CollectionInfoRetriever { + private static final String TIMEOUT_ERROR_MESSAGE = "Collection information retrieval exceeded the timeout limit."; private final MongoClient client; CollectionInfoRetriever(final MongoClient client) { @@ -31,7 +34,8 @@ class CollectionInfoRetriever { } @Nullable - public BsonDocument filter(final String databaseName, final BsonDocument filter) { - return client.getDatabase(databaseName).listCollections(BsonDocument.class).filter(filter).first(); + public BsonDocument filter(final String databaseName, final BsonDocument filter, @Nullable final Timeout operationTimeout) { + return databaseWithTimeout(client.getDatabase(databaseName), TIMEOUT_ERROR_MESSAGE, + operationTimeout).listCollections(BsonDocument.class).filter(filter).first(); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java index 05cfc9462d6..9e2d7b3889b 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java +++ b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java @@ -19,12 +19,15 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.MongoClientException; import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoTimeoutException; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.client.MongoClient; import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoDatabase; import com.mongodb.crypt.capi.MongoCrypt; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.RawBsonDocument; @@ -32,6 +35,7 @@ import java.util.Map; import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; import static com.mongodb.internal.capi.MongoCryptHelper.createMongocryptdClientSettings; import static com.mongodb.internal.capi.MongoCryptHelper.createProcessBuilder; import static com.mongodb.internal.capi.MongoCryptHelper.isMongocryptdSpawningDisabled; @@ -39,6 +43,7 @@ @SuppressWarnings("UseOfProcessBuilder") class CommandMarker implements Closeable { + private static final String TIMEOUT_ERROR_MESSAGE = "Command marker exceeded the timeout limit."; @Nullable private final MongoClient client; @Nullable @@ -58,7 +63,6 @@ class CommandMarker implements Closeable { *
    12. The extraOptions.cryptSharedLibRequired option is false.
    13. * * Then mongocryptd MUST be spawned by the driver. - *

      */ CommandMarker( final MongoCrypt mongoCrypt, @@ -80,17 +84,19 @@ class CommandMarker implements Closeable { } } - RawBsonDocument mark(final String databaseName, final RawBsonDocument command) { + RawBsonDocument mark(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { if (client != null) { try { try { - return executeCommand(databaseName, command); + return executeCommand(databaseName, command, operationTimeout); + } catch (MongoOperationTimeoutException e){ + throw e; } catch (MongoTimeoutException e) { if (processBuilder == null) { // mongocryptdBypassSpawn=true throw e; } startProcess(processBuilder); - return executeCommand(databaseName, command); + return executeCommand(databaseName, command, operationTimeout); } } catch (MongoException e) { throw wrapInClientException(e); @@ -107,11 +113,14 @@ public void close() { } } - private RawBsonDocument executeCommand(final String databaseName, final RawBsonDocument markableCommand) { + private RawBsonDocument executeCommand(final String databaseName, final RawBsonDocument markableCommand, @Nullable final Timeout operationTimeout) { assertNotNull(client); - return client.getDatabase(databaseName) + + MongoDatabase mongoDatabase = client.getDatabase(databaseName) .withReadConcern(ReadConcern.DEFAULT) - .withReadPreference(ReadPreference.primary()) + .withReadPreference(ReadPreference.primary()); + + return databaseWithTimeout(mongoDatabase, TIMEOUT_ERROR_MESSAGE, operationTimeout) .runCommand(markableCommand, RawBsonDocument.class); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java index 792061d7748..53a65ceaa02 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java +++ b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java @@ -20,6 +20,7 @@ import com.mongodb.MongoException; import com.mongodb.MongoInternalException; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.client.MongoClient; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.model.vault.EncryptOptions; @@ -31,6 +32,7 @@ import com.mongodb.crypt.capi.MongoKeyDecryptor; import com.mongodb.crypt.capi.MongoRewrapManyDataKeyOptions; import com.mongodb.internal.capi.MongoCryptHelper; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonBinary; import org.bson.BsonDocument; @@ -131,7 +133,7 @@ public class Crypt implements Closeable { * @param command the unencrypted command * @return the encrypted command */ - RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command) { + RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command, @Nullable final Timeout timeoutOperation) { notNull("databaseName", databaseName); notNull("command", command); @@ -140,7 +142,7 @@ RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command } try (MongoCryptContext encryptionContext = mongoCrypt.createEncryptionContext(databaseName, command)) { - return executeStateMachine(encryptionContext, databaseName); + return executeStateMachine(encryptionContext, databaseName, timeoutOperation); } catch (MongoCryptException e) { throw wrapInMongoException(e); } @@ -152,10 +154,10 @@ RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command * @param commandResponse the encrypted command response * @return the decrypted command response */ - RawBsonDocument decrypt(final RawBsonDocument commandResponse) { + RawBsonDocument decrypt(final RawBsonDocument commandResponse, @Nullable final Timeout timeoutOperation) { notNull("commandResponse", commandResponse); try (MongoCryptContext decryptionContext = mongoCrypt.createDecryptionContext(commandResponse)) { - return executeStateMachine(decryptionContext, null); + return executeStateMachine(decryptionContext, null, timeoutOperation); } catch (MongoCryptException e) { throw wrapInMongoException(e); } @@ -168,7 +170,7 @@ RawBsonDocument decrypt(final RawBsonDocument commandResponse) { * @param options the data key options * @return the document representing the data key to be added to the key vault */ - BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions options) { + BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions options, @Nullable final Timeout operationTimeout) { notNull("kmsProvider", kmsProvider); notNull("options", options); @@ -178,7 +180,7 @@ BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions option .masterKey(options.getMasterKey()) .keyMaterial(options.getKeyMaterial()) .build())) { - return executeStateMachine(dataKeyCreationContext, null); + return executeStateMachine(dataKeyCreationContext, null, operationTimeout); } catch (MongoCryptException e) { throw wrapInMongoException(e); } @@ -191,13 +193,13 @@ BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions option * @param options the options * @return the encrypted value */ - BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options) { + BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options, @Nullable final Timeout timeoutOperation) { notNull("value", value); notNull("options", options); try (MongoCryptContext encryptionContext = mongoCrypt.createExplicitEncryptionContext( new BsonDocument("v", value), asMongoExplicitEncryptOptions(options))) { - return executeStateMachine(encryptionContext, null).getBinary("v"); + return executeStateMachine(encryptionContext, null, timeoutOperation).getBinary("v"); } catch (MongoCryptException e) { throw wrapInMongoException(e); } @@ -210,14 +212,14 @@ BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options * @param options the options * @return the encrypted expression */ - @Beta(Beta.Reason.SERVER) - BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptions options) { + @Beta(Reason.SERVER) + BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout timeoutOperation) { notNull("expression", expression); notNull("options", options); try (MongoCryptContext encryptionContext = mongoCrypt.createEncryptExpressionContext( new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options))) { - return executeStateMachine(encryptionContext, null).getDocument("v"); + return executeStateMachine(encryptionContext, null, timeoutOperation).getDocument("v"); } catch (MongoCryptException e) { throw wrapInMongoException(e); } @@ -229,10 +231,10 @@ BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptio * @param value the encrypted value * @return the decrypted value */ - BsonValue decryptExplicitly(final BsonBinary value) { + BsonValue decryptExplicitly(final BsonBinary value, @Nullable final Timeout timeoutOperation) { notNull("value", value); try (MongoCryptContext decryptionContext = mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value))) { - return assertNotNull(executeStateMachine(decryptionContext, null).get("v")); + return assertNotNull(executeStateMachine(decryptionContext, null, timeoutOperation).get("v")); } catch (MongoCryptException e) { throw wrapInMongoException(e); } @@ -245,7 +247,7 @@ BsonValue decryptExplicitly(final BsonBinary value) { * @return the decrypted value * @since 4.7 */ - BsonDocument rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options) { + BsonDocument rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options, @Nullable final Timeout operationTimeout) { notNull("filter", filter); try { try (MongoCryptContext rewrapManyDatakeyContext = mongoCrypt.createRewrapManyDatakeyContext(filter, @@ -254,7 +256,7 @@ BsonDocument rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKe .provider(options.getProvider()) .masterKey(options.getMasterKey()) .build())) { - return executeStateMachine(rewrapManyDatakeyContext, null); + return executeStateMachine(rewrapManyDatakeyContext, null, operationTimeout); } } catch (MongoCryptException e) { throw wrapInMongoException(e); @@ -274,24 +276,24 @@ public void close() { } } - private RawBsonDocument executeStateMachine(final MongoCryptContext cryptContext, @Nullable final String databaseName) { + private RawBsonDocument executeStateMachine(final MongoCryptContext cryptContext, @Nullable final String databaseName, @Nullable final Timeout operationTimeout) { while (true) { State state = cryptContext.getState(); switch (state) { case NEED_MONGO_COLLINFO: - collInfo(cryptContext, notNull("databaseName", databaseName)); + collInfo(cryptContext, notNull("databaseName", databaseName), operationTimeout); break; case NEED_MONGO_MARKINGS: - mark(cryptContext, notNull("databaseName", databaseName)); + mark(cryptContext, notNull("databaseName", databaseName), operationTimeout); break; case NEED_KMS_CREDENTIALS: fetchCredentials(cryptContext); break; case NEED_MONGO_KEYS: - fetchKeys(cryptContext); + fetchKeys(cryptContext, operationTimeout); break; case NEED_KMS: - decryptKeys(cryptContext); + decryptKeys(cryptContext, operationTimeout); break; case READY: return cryptContext.finish(); @@ -307,9 +309,9 @@ private void fetchCredentials(final MongoCryptContext cryptContext) { cryptContext.provideKmsProviderCredentials(MongoCryptHelper.fetchCredentials(kmsProviders, kmsProviderPropertySuppliers)); } - private void collInfo(final MongoCryptContext cryptContext, final String databaseName) { + private void collInfo(final MongoCryptContext cryptContext, final String databaseName, @Nullable final Timeout operationTimeout) { try { - BsonDocument collectionInfo = assertNotNull(collectionInfoRetriever).filter(databaseName, cryptContext.getMongoOperation()); + BsonDocument collectionInfo = assertNotNull(collectionInfoRetriever).filter(databaseName, cryptContext.getMongoOperation(), operationTimeout); if (collectionInfo != null) { cryptContext.addMongoOperationResult(collectionInfo); } @@ -319,9 +321,9 @@ private void collInfo(final MongoCryptContext cryptContext, final String databas } } - private void mark(final MongoCryptContext cryptContext, final String databaseName) { + private void mark(final MongoCryptContext cryptContext, final String databaseName, @Nullable final Timeout operationTimeout) { try { - RawBsonDocument markedCommand = assertNotNull(commandMarker).mark(databaseName, cryptContext.getMongoOperation()); + RawBsonDocument markedCommand = assertNotNull(commandMarker).mark(databaseName, cryptContext.getMongoOperation(), operationTimeout); cryptContext.addMongoOperationResult(markedCommand); cryptContext.completeMongoOperation(); } catch (Throwable t) { @@ -329,9 +331,9 @@ private void mark(final MongoCryptContext cryptContext, final String databaseNam } } - private void fetchKeys(final MongoCryptContext keyBroker) { + private void fetchKeys(final MongoCryptContext keyBroker, @Nullable final Timeout operationTimeout) { try { - for (BsonDocument bsonDocument : keyRetriever.find(keyBroker.getMongoOperation())) { + for (BsonDocument bsonDocument : keyRetriever.find(keyBroker.getMongoOperation(), operationTimeout)) { keyBroker.addMongoOperationResult(bsonDocument); } keyBroker.completeMongoOperation(); @@ -340,11 +342,11 @@ private void fetchKeys(final MongoCryptContext keyBroker) { } } - private void decryptKeys(final MongoCryptContext cryptContext) { + private void decryptKeys(final MongoCryptContext cryptContext, @Nullable final Timeout operationTimeout) { try { MongoKeyDecryptor keyDecryptor = cryptContext.nextKeyDecryptor(); while (keyDecryptor != null) { - decryptKey(keyDecryptor); + decryptKey(keyDecryptor, operationTimeout); keyDecryptor = cryptContext.nextKeyDecryptor(); } cryptContext.completeKeyDecryptors(); @@ -354,9 +356,9 @@ private void decryptKeys(final MongoCryptContext cryptContext) { } } - private void decryptKey(final MongoKeyDecryptor keyDecryptor) throws IOException { + private void decryptKey(final MongoKeyDecryptor keyDecryptor, @Nullable final Timeout operationTimeout) throws IOException { try (InputStream inputStream = keyManagementService.stream(keyDecryptor.getKmsProvider(), keyDecryptor.getHostName(), - keyDecryptor.getMessage())) { + keyDecryptor.getMessage(), operationTimeout)) { int bytesNeeded = keyDecryptor.bytesNeeded(); while (bytesNeeded > 0) { diff --git a/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java b/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java index ab195a46dd5..036466077ec 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java +++ b/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java @@ -17,17 +17,14 @@ package com.mongodb.client.internal; import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; import com.mongodb.ServerAddress; -import com.mongodb.ServerApi; import com.mongodb.connection.ServerDescription; import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.binding.ReadWriteBinding; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.session.SessionContext; -import com.mongodb.lang.Nullable; + class CryptBinding implements ClusterAwareReadWriteBinding { private final ClusterAwareReadWriteBinding wrapped; @@ -63,22 +60,6 @@ public ConnectionSource getConnectionSource(final ServerAddress serverAddress) { return new CryptConnectionSource(wrapped.getConnectionSource(serverAddress)); } - @Override - public SessionContext getSessionContext() { - return wrapped.getSessionContext(); - } - - @Override - @Nullable - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); - } - @Override public OperationContext getOperationContext() { return wrapped.getOperationContext(); @@ -112,26 +93,11 @@ public ServerDescription getServerDescription() { return wrapped.getServerDescription(); } - @Override - public SessionContext getSessionContext() { - return wrapped.getSessionContext(); - } - @Override public OperationContext getOperationContext() { return wrapped.getOperationContext(); } - @Override - public ServerApi getServerApi() { - return wrapped.getServerApi(); - } - - @Override - public RequestContext getRequestContext() { - return wrapped.getRequestContext(); - } - @Override public ReadPreference getReadPreference() { return wrapped.getReadPreference(); diff --git a/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java index 18742d487f9..f47f6a810a6 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java +++ b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java @@ -19,11 +19,12 @@ import com.mongodb.MongoClientException; import com.mongodb.ReadPreference; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.binding.BindingContext; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.MessageSettings; +import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.SplittablePayload; import com.mongodb.internal.connection.SplittablePayloadBsonWriter; +import com.mongodb.internal.time.Timeout; import com.mongodb.internal.validator.MappedFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonBinaryReader; @@ -86,7 +87,7 @@ public ConnectionDescription getDescription() { @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final BindingContext context, final boolean responseExpected, + final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) { if (serverIsLessThanVersionFourDotTwo(wrapped.getDescription())) { @@ -104,17 +105,18 @@ public T command(final String database, final BsonDocument command, final Fi getEncoder(command).encode(writer, command, EncoderContext.builder().build()); + Timeout operationTimeout = operationContext.getTimeoutContext().getTimeout(); RawBsonDocument encryptedCommand = crypt.encrypt(database, - new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize())); + new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout); RawBsonDocument encryptedResponse = wrapped.command(database, encryptedCommand, commandFieldNameValidator, readPreference, - new RawBsonDocumentCodec(), context, responseExpected, null, null); + new RawBsonDocumentCodec(), operationContext, responseExpected, null, null); if (encryptedResponse == null) { return null; } - RawBsonDocument decryptedResponse = crypt.decrypt(encryptedResponse); + RawBsonDocument decryptedResponse = crypt.decrypt(encryptedResponse, operationTimeout); BsonBinaryReader reader = new BsonBinaryReader(decryptedResponse.getByteBuffer().asNIO()); @@ -124,8 +126,8 @@ public T command(final String database, final BsonDocument command, final Fi @Nullable @Override public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, - @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final BindingContext context) { - return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, context, true, null, null); + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext) { + return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, null, null); } @SuppressWarnings("unchecked") diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypts.java b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java index 73e4d42e8ef..55274fcc786 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/Crypts.java +++ b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java @@ -35,11 +35,11 @@ */ public final class Crypts { - public static Crypt createCrypt(final MongoClientImpl client, final AutoEncryptionSettings settings) { + public static Crypt createCrypt(final MongoClientSettings mongoClientSettings, final AutoEncryptionSettings settings) { MongoClient sharedInternalClient = null; MongoClientSettings keyVaultMongoClientSettings = settings.getKeyVaultMongoClientSettings(); if (keyVaultMongoClientSettings == null || !settings.isBypassAutoEncryption()) { - MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(client.getSettings()) + MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(mongoClientSettings) .applyToConnectionPoolSettings(builder -> builder.minSize(0)) .autoEncryptionSettings(null) .build(); diff --git a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java index 3c4e1d18ea3..b37931c52cb 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java @@ -21,7 +21,9 @@ import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; import com.mongodb.client.DistinctIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.SyncOperations; @@ -46,19 +48,12 @@ class DistinctIterableImpl extends MongoIterableImpl documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final OperationExecutor executor, final String fieldName, final Bson filter) { - this(clientSession, namespace, documentClass, resultClass, codecRegistry, readPreference, readConcern, executor, fieldName, - filter, true); - } - DistinctIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final OperationExecutor executor, final String fieldName, final Bson filter, - final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); - this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads); + final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); + this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutSettings); this.resultClass = notNull("resultClass", resultClass); this.fieldName = notNull("mapFunction", fieldName); this.filter = filter; @@ -83,6 +78,12 @@ public DistinctIterable batchSize(final int batchSize) { return this; } + @Override + public DistinctIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public DistinctIterable collation(@Nullable final Collation collation) { this.collation = collation; @@ -103,6 +104,11 @@ public DistinctIterable comment(@Nullable final BsonValue comment) { @Override public ReadOperation> asReadOperation() { - return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment); + return operations.distinct(fieldName, filter, resultClass, collation, comment); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java index de0fdc94f3e..fbead0d7911 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java @@ -23,7 +23,9 @@ import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; import com.mongodb.client.FindIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ExplainableReadOperation; @@ -49,16 +51,11 @@ class FindIterableImpl extends MongoIterableImpl im private Bson filter; FindIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final OperationExecutor executor, final Bson filter) { - this(clientSession, namespace, documentClass, resultClass, codecRegistry, readPreference, readConcern, executor, filter, true); - } - - FindIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final OperationExecutor executor, final Bson filter, final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); - this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads); + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final OperationExecutor executor, final Bson filter, final boolean retryReads, + final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); + this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutSettings); this.resultClass = notNull("resultClass", resultClass); this.filter = notNull("filter", filter); this.findOptions = new FindOptions(); @@ -92,7 +89,7 @@ public FindIterable maxTime(final long maxTime, final TimeUnit timeUnit @Override public FindIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); + validateMaxAwaitTime(maxAwaitTime, timeUnit); findOptions.maxAwaitTime(maxAwaitTime, timeUnit); return this; } @@ -104,6 +101,13 @@ public FindIterable batchSize(final int batchSize) { return this; } + @Override + public FindIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + findOptions.timeoutMode(timeoutMode); + return this; + } + @Override public FindIterable collation(@Nullable final Collation collation) { findOptions.collation(collation); @@ -203,8 +207,8 @@ public FindIterable allowDiskUse(@Nullable final Boolean allowDiskUse) @Nullable @Override public TResult first() { - try (BatchCursor batchCursor = getExecutor().execute(operations.findFirst(filter, resultClass, findOptions), - getReadPreference(), getReadConcern(), getClientSession())) { + try (BatchCursor batchCursor = getExecutor().execute( + operations.findFirst(filter, resultClass, findOptions), getReadPreference(), getReadConcern(), getClientSession())) { return batchCursor.hasNext() ? batchCursor.next().iterator().next() : null; } } @@ -229,10 +233,15 @@ public E explain(final Class explainResultClass, final ExplainVerbosity v return executeExplain(explainResultClass, notNull("verbosity", verbosity)); } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(findOptions)); + } + private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getExecutor().execute(asReadOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), - getReadPreference(), getReadConcern(), getClientSession()); + return getExecutor().execute( + asReadOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession()); } public ExplainableReadOperation> asReadOperation() { diff --git a/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java b/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java index 7ae6f106ed5..fee5ddac729 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java +++ b/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java @@ -17,9 +17,13 @@ package com.mongodb.client.internal; import com.mongodb.ServerAddress; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.connection.SslHelper; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; -import com.mongodb.internal.connection.SslHelper; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.jetbrains.annotations.NotNull; import javax.net.SocketFactory; import javax.net.ssl.SSLContext; @@ -32,10 +36,14 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; +import java.net.SocketException; import java.nio.ByteBuffer; import java.util.Map; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; class KeyManagementService { private static final Logger LOGGER = Loggers.getLogger("client"); @@ -47,7 +55,7 @@ class KeyManagementService { this.timeoutMillis = timeoutMillis; } - public InputStream stream(final String kmsProvider, final String host, final ByteBuffer message) throws IOException { + public InputStream stream(final String kmsProvider, final String host, final ByteBuffer message, @Nullable final Timeout operationTimeout) throws IOException { ServerAddress serverAddress = new ServerAddress(host); LOGGER.info("Connecting to KMS server at " + serverAddress); @@ -79,7 +87,7 @@ public InputStream stream(final String kmsProvider, final String host, final Byt } try { - return socket.getInputStream(); + return OperationTimeoutAwareInputStream.wrapIfNeeded(operationTimeout, socket); } catch (IOException e) { closeSocket(socket); throw e; @@ -102,4 +110,85 @@ private void closeSocket(final Socket socket) { // ignore } } + + private static final class OperationTimeoutAwareInputStream extends InputStream { + private final Socket socket; + private final Timeout operationTimeout; + private final InputStream wrapped; + + /** + * @param socket - socket to set timeout on. + * @param operationTimeout - non-infinite timeout. + */ + private OperationTimeoutAwareInputStream(final Socket socket, final Timeout operationTimeout) throws IOException { + this.socket = socket; + this.operationTimeout = operationTimeout; + this.wrapped = socket.getInputStream(); + } + + public static InputStream wrapIfNeeded(@Nullable final Timeout operationTimeout, final SSLSocket socket) throws IOException { + return Timeout.nullAsInfinite(operationTimeout).checkedCall(NANOSECONDS, + () -> socket.getInputStream(), + (ns) -> new OperationTimeoutAwareInputStream(socket, assertNotNull(operationTimeout)), + () -> new OperationTimeoutAwareInputStream(socket, assertNotNull(operationTimeout))); + } + + private void setSocketSoTimeoutToOperationTimeout() throws SocketException { + operationTimeout.checkedRun(MILLISECONDS, + () -> { + throw new AssertionError("operationTimeout cannot be infinite"); + }, + (ms) -> socket.setSoTimeout(Math.toIntExact(ms)), + () -> TimeoutContext.throwMongoTimeoutException("Reading from KMS server exceeded the timeout limit.")); + } + + @Override + public int read() throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.read(); + } + + @Override + public int read(@NotNull final byte[] b) throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.read(b); + } + + @Override + public int read(@NotNull final byte[] b, final int off, final int len) throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.read(b, off, len); + } + + @Override + public void close() throws IOException { + wrapped.close(); + } + + @Override + public long skip(final long n) throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.skip(n); + } + + @Override + public int available() throws IOException { + return wrapped.available(); + } + + @Override + public synchronized void mark(final int readlimit) { + wrapped.mark(readlimit); + } + + @Override + public synchronized void reset() throws IOException { + wrapped.reset(); + } + + @Override + public boolean markSupported() { + return wrapped.markSupported(); + } + } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java b/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java index 14906349404..59544eefc45 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java +++ b/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java @@ -19,14 +19,19 @@ import com.mongodb.MongoNamespace; import com.mongodb.ReadConcern; import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCollection; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import java.util.ArrayList; import java.util.List; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout; class KeyRetriever { + private static final String TIMEOUT_ERROR_MESSAGE = "Key retrieval exceeded the timeout limit."; private final MongoClient client; private final MongoNamespace namespace; @@ -35,8 +40,11 @@ class KeyRetriever { this.namespace = notNull("namespace", namespace); } - public List find(final BsonDocument keyFilter) { - return client.getDatabase(namespace.getDatabaseName()).getCollection(namespace.getCollectionName(), BsonDocument.class) + public List find(final BsonDocument keyFilter, @Nullable final Timeout operationTimeout) { + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class); + + return collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, operationTimeout) .withReadConcern(ReadConcern.MAJORITY) .find(keyFilter).into(new ArrayList<>()); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java index e6da2f332c1..7d617947077 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java @@ -21,6 +21,8 @@ import com.mongodb.client.ClientSession; import com.mongodb.client.ListCollectionNamesIterable; import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.SyncOperations; @@ -40,7 +42,6 @@ class ListCollectionsIterableImpl extends MongoIterableImpl im private final SyncOperations operations; private final String databaseName; private final Class resultClass; - private Bson filter; private final boolean collectionNamesOnly; private boolean authorizedCollections; @@ -49,10 +50,10 @@ class ListCollectionsIterableImpl extends MongoIterableImpl im ListCollectionsIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final boolean collectionNamesOnly, final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final OperationExecutor executor, final boolean retryReads) { - super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); // TODO: read concern? + final OperationExecutor executor, final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); // TODO: read concern? this.collectionNamesOnly = collectionNamesOnly; - this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads); + this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); this.databaseName = notNull("databaseName", databaseName); this.resultClass = notNull("resultClass", resultClass); } @@ -76,6 +77,12 @@ public ListCollectionsIterable batchSize(final int batchSize) { return this; } + @Override + public ListCollectionsIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public ListCollectionsIterable comment(@Nullable final String comment) { this.comment = comment != null ? new BsonString(comment) : null; @@ -99,6 +106,11 @@ ListCollectionsIterableImpl authorizedCollections(final boolean authori @Override public ReadOperation> asReadOperation() { return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections, - getBatchSize(), maxTimeMS, comment); + getBatchSize(), comment, getTimeoutMode()); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java index 50c4eb14b4a..83bc08b3dd1 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java @@ -19,6 +19,8 @@ import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.SyncOperations; @@ -48,17 +50,11 @@ public class ListDatabasesIterableImpl extends MongoIterableImpl resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, - final OperationExecutor executor) { - this(clientSession, resultClass, codecRegistry, readPreference, executor, true); - } - public ListDatabasesIterableImpl(@Nullable final ClientSession clientSession, final Class resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, - final OperationExecutor executor, final boolean retryReads) { - super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); // TODO: read concern? - this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads); + final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, + final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); // TODO: read concern? + this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); this.resultClass = notNull("clazz", resultClass); } @@ -75,6 +71,12 @@ public ListDatabasesIterable batchSize(final int batchSize) { return this; } + @Override + public ListDatabasesIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public ListDatabasesIterable filter(@Nullable final Bson filter) { this.filter = filter; @@ -107,6 +109,11 @@ public ListDatabasesIterable comment(@Nullable final BsonValue comment) @Override public ReadOperation> asReadOperation() { - return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabasesOnly, comment); + return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabasesOnly, comment); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java index c2a9d528007..19be1bdc8ed 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java @@ -21,6 +21,8 @@ import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.SyncOperations; @@ -42,15 +44,10 @@ class ListIndexesIterableImpl extends MongoIterableImpl implem private BsonValue comment; ListIndexesIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor) { - this(clientSession, namespace, resultClass, codecRegistry, readPreference, executor, true); - } - - ListIndexesIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, - final boolean retryReads) { - super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); - this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads); + final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, + final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); + this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); this.resultClass = notNull("resultClass", resultClass); } @@ -67,6 +64,12 @@ public ListIndexesIterable batchSize(final int batchSize) { return this; } + @Override + public ListIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public ListIndexesIterable comment(@Nullable final String comment) { this.comment = comment != null ? new BsonString(comment) : null; @@ -81,6 +84,10 @@ public ListIndexesIterable comment(@Nullable final BsonValue comment) { @Override public ReadOperation> asReadOperation() { - return operations.listIndexes(resultClass, getBatchSize(), maxTimeMS, comment); + return operations.listIndexes(resultClass, getBatchSize(), comment, getTimeoutMode()); + } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java index 0ffc9cea7a5..c67106d357d 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java @@ -21,7 +21,9 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.client.ListSearchIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ExplainableReadOperation; import com.mongodb.internal.operation.ReadOperation; @@ -54,11 +56,10 @@ final class ListSearchIndexesIterableImpl extends MongoIterableImpl resultClass, final CodecRegistry codecRegistry, - final ReadPreference readPreference, final boolean retryReads) { - super(null, executor, ReadConcern.DEFAULT, readPreference, retryReads); - + final ReadPreference readPreference, final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(null, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); this.resultClass = resultClass; - this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads); + this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); this.codecRegistry = codecRegistry; } @@ -67,7 +68,6 @@ public ReadOperation> asReadOperation() { return asAggregateOperation(); } - @Override public ListSearchIndexesIterable allowDiskUse(@Nullable final Boolean allowDiskUse) { this.allowDiskUse = allowDiskUse; @@ -80,6 +80,12 @@ public ListSearchIndexesIterable batchSize(final int batchSize) { return this; } + @Override + public ListSearchIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public ListSearchIndexesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { notNull("timeUnit", timeUnit); @@ -136,12 +142,18 @@ public E explain(final Class explainResultClass, final ExplainVerbosity v } private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { - return getExecutor().execute(asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), - getReadPreference(), getReadConcern(), getClientSession()); + return getExecutor().execute(asAggregateOperation() + .asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession()); } private ExplainableReadOperation> asAggregateOperation() { - return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, getBatchSize(), collation, comment, + return operations.listSearchIndexes(resultClass, indexName, getBatchSize(), collation, comment, allowDiskUse); } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } + } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java index 9c531f45d58..8a0107aafeb 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java @@ -21,7 +21,9 @@ import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; import com.mongodb.client.ClientSession; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.operation.BatchCursor; @@ -41,8 +43,7 @@ import static com.mongodb.assertions.Assertions.notNull; @SuppressWarnings("deprecation") -class MapReduceIterableImpl extends MongoIterableImpl - implements com.mongodb.client.MapReduceIterable { +class MapReduceIterableImpl extends MongoIterableImpl implements com.mongodb.client.MapReduceIterable { private final SyncOperations operations; private final MongoNamespace namespace; private final Class resultClass; @@ -67,10 +68,10 @@ class MapReduceIterableImpl extends MongoIterableImpl documentClass, final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final String mapFunction, final String reduceFunction) { - super(clientSession, executor, readConcern, readPreference, false); + final String mapFunction, final String reduceFunction, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, false, timeoutSettings); this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - false, false); + false, false, timeoutSettings); this.namespace = notNull("namespace", namespace); this.resultClass = notNull("resultClass", resultClass); this.mapFunction = notNull("mapFunction", mapFunction); @@ -160,6 +161,12 @@ public com.mongodb.client.MapReduceIterable batchSize(final int batchSi return this; } + @Override + public com.mongodb.client.MapReduceIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + @Override public com.mongodb.client.MapReduceIterable bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { this.bypassDocumentValidation = bypassDocumentValidation; @@ -181,11 +188,16 @@ ReadPreference getReadPreference() { } } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } + @Override public ReadOperation> asReadOperation() { if (inline) { ReadOperation> operation = operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, - resultClass, filter, limit, maxTimeMS, jsMode, scope, sort, verbose, collation); + resultClass, filter, limit, jsMode, scope, sort, verbose, collation); return new WrappedMapReduceReadOperation<>(operation); } else { getExecutor().execute(createMapReduceToCollectionOperation(), getReadConcern(), getClientSession()); @@ -204,7 +216,7 @@ public ReadOperation> asReadOperation() { private WriteOperation createMapReduceToCollectionOperation() { return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, - limit, maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation + limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation ); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java deleted file mode 100644 index 8703fc8ce2d..00000000000 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.client.internal; - -import com.mongodb.ClientSessionOptions; -import com.mongodb.MongoClientException; -import com.mongodb.MongoException; -import com.mongodb.MongoInternalException; -import com.mongodb.MongoQueryException; -import com.mongodb.MongoSocketException; -import com.mongodb.MongoTimeoutException; -import com.mongodb.ReadConcern; -import com.mongodb.ReadPreference; -import com.mongodb.RequestContext; -import com.mongodb.ServerApi; -import com.mongodb.TransactionOptions; -import com.mongodb.WriteConcern; -import com.mongodb.client.ClientSession; -import com.mongodb.client.SynchronousContextProvider; -import com.mongodb.internal.IgnorableRequestContext; -import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; -import com.mongodb.internal.binding.ClusterBinding; -import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.binding.ReadWriteBinding; -import com.mongodb.internal.binding.WriteBinding; -import com.mongodb.internal.connection.Cluster; -import com.mongodb.internal.operation.ReadOperation; -import com.mongodb.internal.operation.WriteOperation; -import com.mongodb.internal.session.ServerSessionPool; -import com.mongodb.lang.Nullable; -import org.bson.codecs.configuration.CodecRegistry; - -import java.util.concurrent.atomic.AtomicBoolean; - -import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; -import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; -import static com.mongodb.ReadPreference.primary; -import static com.mongodb.assertions.Assertions.isTrue; -import static com.mongodb.assertions.Assertions.notNull; - -final class MongoClientDelegate { - private final Cluster cluster; - private final ServerSessionPool serverSessionPool; - private final Object originator; - private final OperationExecutor operationExecutor; - private final Crypt crypt; - @Nullable - private final ServerApi serverApi; - private final CodecRegistry codecRegistry; - @Nullable - private final SynchronousContextProvider contextProvider; - private final AtomicBoolean closed; - - MongoClientDelegate(final Cluster cluster, final CodecRegistry codecRegistry, - final Object originator, @Nullable final OperationExecutor operationExecutor, - @Nullable final Crypt crypt, @Nullable final ServerApi serverApi, - @Nullable final SynchronousContextProvider contextProvider) { - this.cluster = cluster; - this.codecRegistry = codecRegistry; - this.contextProvider = contextProvider; - this.serverSessionPool = new ServerSessionPool(cluster, serverApi); - this.originator = originator; - this.operationExecutor = operationExecutor == null ? new DelegateOperationExecutor() : operationExecutor; - this.crypt = crypt; - this.serverApi = serverApi; - this.closed = new AtomicBoolean(); - } - - public OperationExecutor getOperationExecutor() { - return operationExecutor; - } - - public ClientSession createClientSession(final ClientSessionOptions options, final ReadConcern readConcern, - final WriteConcern writeConcern, final ReadPreference readPreference) { - notNull("readConcern", readConcern); - notNull("writeConcern", writeConcern); - notNull("readPreference", readPreference); - - ClientSessionOptions mergedOptions = ClientSessionOptions.builder(options) - .defaultTransactionOptions( - TransactionOptions.merge( - options.getDefaultTransactionOptions(), - TransactionOptions.builder() - .readConcern(readConcern) - .writeConcern(writeConcern) - .readPreference(readPreference) - .build())) - .build(); - return new ClientSessionImpl(serverSessionPool, originator, mergedOptions, this); - } - - public void close() { - if (!closed.getAndSet(true)) { - if (crypt != null) { - crypt.close(); - } - serverSessionPool.close(); - cluster.close(); - } - } - - public Cluster getCluster() { - return cluster; - } - - public CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - public ServerSessionPool getServerSessionPool() { - return serverSessionPool; - } - - private class DelegateOperationExecutor implements OperationExecutor { - @Override - public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern) { - return execute(operation, readPreference, readConcern, null); - } - - @Override - public T execute(final WriteOperation operation, final ReadConcern readConcern) { - return execute(operation, readConcern, null); - } - - @Override - public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern, - @Nullable final ClientSession session) { - if (session != null) { - session.notifyOperationInitiated(operation); - } - - ClientSession actualClientSession = getClientSession(session); - ReadBinding binding = getReadBinding(readPreference, readConcern, actualClientSession, session == null); - - try { - if (actualClientSession.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) { - throw new MongoClientException("Read preference in a transaction must be primary"); - } - return operation.execute(binding); - } catch (MongoException e) { - labelException(actualClientSession, e); - clearTransactionContextOnTransientTransactionError(session, e); - throw e; - } finally { - binding.release(); - } - } - - @Override - public T execute(final WriteOperation operation, final ReadConcern readConcern, @Nullable final ClientSession session) { - if (session != null) { - session.notifyOperationInitiated(operation); - } - - ClientSession actualClientSession = getClientSession(session); - WriteBinding binding = getWriteBinding(readConcern, actualClientSession, session == null); - - try { - return operation.execute(binding); - } catch (MongoException e) { - labelException(actualClientSession, e); - clearTransactionContextOnTransientTransactionError(session, e); - throw e; - } finally { - binding.release(); - } - } - - WriteBinding getWriteBinding(final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) { - return getReadWriteBinding(primary(), readConcern, session, ownsSession); - } - - ReadBinding getReadBinding(final ReadPreference readPreference, final ReadConcern readConcern, - final ClientSession session, final boolean ownsSession) { - return getReadWriteBinding(readPreference, readConcern, session, ownsSession); - } - - ReadWriteBinding getReadWriteBinding(final ReadPreference readPreference, final ReadConcern readConcern, - final ClientSession session, final boolean ownsSession) { - ClusterAwareReadWriteBinding readWriteBinding = new ClusterBinding(cluster, - getReadPreferenceForBinding(readPreference, session), readConcern, serverApi, getContext()); - - if (crypt != null) { - readWriteBinding = new CryptBinding(readWriteBinding, crypt); - } - - return new ClientSessionBinding(session, ownsSession, readWriteBinding); - } - - private RequestContext getContext() { - RequestContext context = null; - if (contextProvider != null) { - context = contextProvider.getContext(); - } - return context == null ? IgnorableRequestContext.INSTANCE : context; - } - - private void labelException(final ClientSession session, final MongoException e) { - if (session.hasActiveTransaction() && (e instanceof MongoSocketException || e instanceof MongoTimeoutException - || e instanceof MongoQueryException && e.getCode() == 91) - && !e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { - e.addLabel(TRANSIENT_TRANSACTION_ERROR_LABEL); - } - } - - private void clearTransactionContextOnTransientTransactionError(@Nullable final ClientSession session, final MongoException e) { - if (session != null && e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)) { - session.clearTransactionContext(); - } - } - - private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) { - if (session == null) { - return readPreference; - } - if (session.hasActiveTransaction()) { - ReadPreference readPreferenceForBinding = session.getTransactionOptions().getReadPreference(); - if (readPreferenceForBinding == null) { - throw new MongoInternalException("Invariant violated. Transaction options read preference can not be null"); - } - return readPreferenceForBinding; - } - return readPreference; - } - - ClientSession getClientSession(@Nullable final ClientSession clientSessionFromOperation) { - ClientSession session; - if (clientSessionFromOperation != null) { - isTrue("ClientSession from same MongoClient", clientSessionFromOperation.getOriginator() == originator); - session = clientSessionFromOperation; - } else { - session = createClientSession(ClientSessionOptions.builder().causallyConsistent(false).build(), ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED, primary()); - } - return session; - } - } -} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java index 0a560442639..473d8ec4e8e 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java @@ -20,19 +20,21 @@ import com.mongodb.ClientSessionOptions; import com.mongodb.MongoClientSettings; import com.mongodb.MongoDriverInformation; +import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; -import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; import com.mongodb.client.ChangeStreamIterable; import com.mongodb.client.ClientSession; import com.mongodb.client.ListDatabasesIterable; import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; import com.mongodb.client.SynchronousContextProvider; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.SocketSettings; import com.mongodb.connection.TransportSettings; -import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.DefaultClusterFactory; import com.mongodb.internal.connection.InternalConnectionPoolSettings; @@ -48,8 +50,9 @@ import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; -import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.client.internal.Crypts.createCrypt; @@ -68,7 +71,8 @@ public final class MongoClientImpl implements MongoClient { private final MongoClientSettings settings; private final MongoDriverInformation mongoDriverInformation; - private final MongoClientDelegate delegate; + private final MongoClusterImpl delegate; + private final AtomicBoolean closed; public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation) { this(createCluster(settings, mongoDriverInformation), mongoDriverInformation, settings, null); @@ -84,136 +88,172 @@ public MongoClientImpl(final Cluster cluster, final MongoDriverInformation mongo throw new IllegalArgumentException("The contextProvider must be an instance of " + SynchronousContextProvider.class.getName() + " when using the synchronous driver"); } - this.delegate = new MongoClientDelegate(notNull("cluster", cluster), - withUuidRepresentation(settings.getCodecRegistry(), settings.getUuidRepresentation()), this, operationExecutor, - autoEncryptionSettings == null ? null : createCrypt(this, autoEncryptionSettings), settings.getServerApi(), - (SynchronousContextProvider) settings.getContextProvider()); + + this.delegate = new MongoClusterImpl(autoEncryptionSettings, cluster, + withUuidRepresentation(settings.getCodecRegistry(), settings.getUuidRepresentation()), + (SynchronousContextProvider) settings.getContextProvider(), + autoEncryptionSettings == null ? null : createCrypt(settings, autoEncryptionSettings), this, + operationExecutor, settings.getReadConcern(), settings.getReadPreference(), settings.getRetryReads(), + settings.getRetryWrites(), settings.getServerApi(), + new ServerSessionPool(cluster, TimeoutSettings.create(settings), settings.getServerApi()), + TimeoutSettings.create(settings), settings.getUuidRepresentation(), settings.getWriteConcern()); + this.closed = new AtomicBoolean(); BsonDocument clientMetadataDocument = createClientMetadataDocument(settings.getApplicationName(), mongoDriverInformation); + LOGGER.info(format("MongoClient with metadata %s created with settings %s", clientMetadataDocument.toJson(), settings)); } @Override - public MongoDatabase getDatabase(final String databaseName) { - return new MongoDatabaseImpl(databaseName, delegate.getCodecRegistry(), settings.getReadPreference(), settings.getWriteConcern(), - settings.getRetryWrites(), settings.getRetryReads(), settings.getReadConcern(), - settings.getUuidRepresentation(), settings.getAutoEncryptionSettings(), delegate.getOperationExecutor()); + public void close() { + if (!closed.getAndSet(true)) { + Crypt crypt = delegate.getCrypt(); + if (crypt != null) { + crypt.close(); + } + delegate.getServerSessionPool().close(); + delegate.getCluster().close(); + } } @Override - public MongoIterable listDatabaseNames() { - return createListDatabaseNamesIterable(null); + public ClusterDescription getClusterDescription() { + return delegate.getCluster().getCurrentDescription(); } @Override - public MongoIterable listDatabaseNames(final ClientSession clientSession) { - notNull("clientSession", clientSession); - return createListDatabaseNamesIterable(clientSession); + public CodecRegistry getCodecRegistry() { + return delegate.getCodecRegistry(); } @Override - public ListDatabasesIterable listDatabases() { - return listDatabases(Document.class); + public ReadPreference getReadPreference() { + return delegate.getReadPreference(); } @Override - public ListDatabasesIterable listDatabases(final Class clazz) { - return createListDatabasesIterable(null, clazz); + public WriteConcern getWriteConcern() { + return delegate.getWriteConcern(); } @Override - public ListDatabasesIterable listDatabases(final ClientSession clientSession) { - return listDatabases(clientSession, Document.class); + public ReadConcern getReadConcern() { + return delegate.getReadConcern(); } @Override - public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class clazz) { - notNull("clientSession", clientSession); - return createListDatabasesIterable(clientSession, clazz); + public Long getTimeout(final TimeUnit timeUnit) { + return delegate.getTimeout(timeUnit); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return delegate.withCodecRegistry(codecRegistry); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return delegate.withReadPreference(readPreference); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return delegate.withWriteConcern(writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return delegate.withReadConcern(readConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return delegate.withTimeout(timeout, timeUnit); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return delegate.getDatabase(databaseName); } @Override public ClientSession startSession() { - return startSession(ClientSessionOptions - .builder() - .defaultTransactionOptions(TransactionOptions.builder() - .readConcern(settings.getReadConcern()) - .writeConcern(settings.getWriteConcern()) - .build()) - .build()); + return delegate.startSession(); } @Override public ClientSession startSession(final ClientSessionOptions options) { - return delegate.createClientSession(notNull("options", options), - settings.getReadConcern(), settings.getWriteConcern(), settings.getReadPreference()); + return delegate.startSession(options); } @Override - public void close() { - delegate.close(); + public MongoIterable listDatabaseNames() { + return delegate.listDatabaseNames(); } @Override - public ChangeStreamIterable watch() { - return watch(Collections.emptyList()); + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + return delegate.listDatabaseNames(clientSession); } @Override - public ChangeStreamIterable watch(final Class resultClass) { - return watch(Collections.emptyList(), resultClass); + public ListDatabasesIterable listDatabases() { + return delegate.listDatabases(); } @Override - public ChangeStreamIterable watch(final List pipeline) { - return watch(pipeline, Document.class); + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return delegate.listDatabases(clientSession); } @Override - public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { - return createChangeStreamIterable(null, pipeline, resultClass); + public ListDatabasesIterable listDatabases(final Class resultClass) { + return delegate.listDatabases(resultClass); } @Override - public ChangeStreamIterable watch(final ClientSession clientSession) { - return watch(clientSession, Collections.emptyList(), Document.class); + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class resultClass) { + return delegate.listDatabases(clientSession, resultClass); } @Override - public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { - return watch(clientSession, Collections.emptyList(), resultClass); + public ChangeStreamIterable watch() { + return delegate.watch(); } @Override - public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { - return watch(clientSession, pipeline, Document.class); + public ChangeStreamIterable watch(final Class resultClass) { + return delegate.watch(resultClass); } @Override - public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, - final Class resultClass) { - notNull("clientSession", clientSession); - return createChangeStreamIterable(clientSession, pipeline, resultClass); + public ChangeStreamIterable watch(final List pipeline) { + return delegate.watch(pipeline); } @Override - public ClusterDescription getClusterDescription() { - return delegate.getCluster().getCurrentDescription(); + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return delegate.watch(pipeline, resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return delegate.watch(clientSession); } - private ChangeStreamIterable createChangeStreamIterable(@Nullable final ClientSession clientSession, - final List pipeline, - final Class resultClass) { - return new ChangeStreamIterableImpl<>(clientSession, "admin", delegate.getCodecRegistry(), settings.getReadPreference(), - settings.getReadConcern(), delegate.getOperationExecutor(), - pipeline, resultClass, ChangeStreamLevel.CLIENT, settings.getRetryReads()); + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return delegate.watch(clientSession, resultClass); } - public Cluster getCluster() { - return delegate.getCluster(); + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return delegate.watch(clientSession, pipeline); } - public CodecRegistry getCodecRegistry() { - return delegate.getCodecRegistry(); + @Override + public ChangeStreamIterable watch( + final ClientSession clientSession, final List pipeline, final Class resultClass) { + return delegate.watch(clientSession, pipeline, resultClass); } private static Cluster createCluster(final MongoClientSettings settings, @@ -221,7 +261,8 @@ private static Cluster createCluster(final MongoClientSettings settings, notNull("settings", settings); return new DefaultClusterFactory().createCluster(settings.getClusterSettings(), settings.getServerSettings(), settings.getConnectionPoolSettings(), InternalConnectionPoolSettings.builder().build(), - getStreamFactory(settings, false), getStreamFactory(settings, true), + TimeoutSettings.create(settings), getStreamFactory(settings, false), + TimeoutSettings.createHeartbeatSettings(settings), getStreamFactory(settings, true), settings.getCredential(), settings.getLoggerSettings(), getCommandListener(settings.getCommandListeners()), settings.getApplicationName(), mongoDriverInformation, settings.getCompressorList(), settings.getServerApi(), settings.getDnsClient()); @@ -239,13 +280,8 @@ private static StreamFactory getStreamFactory(final MongoClientSettings settings } } - private ListDatabasesIterable createListDatabasesIterable(@Nullable final ClientSession clientSession, final Class clazz) { - return new ListDatabasesIterableImpl<>(clientSession, clazz, delegate.getCodecRegistry(), ReadPreference.primary(), - delegate.getOperationExecutor(), settings.getRetryReads()); - } - - private MongoIterable createListDatabaseNamesIterable(@Nullable final ClientSession clientSession) { - return createListDatabasesIterable(clientSession, BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue()); + public Cluster getCluster() { + return delegate.getCluster(); } public ServerSessionPool getServerSessionPool() { @@ -256,6 +292,10 @@ public OperationExecutor getOperationExecutor() { return delegate.getOperationExecutor(); } + public TimeoutSettings getTimeoutSettings() { + return delegate.getTimeoutSettings(); + } + public MongoClientSettings getSettings() { return settings; } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java new file mode 100644 index 00000000000..b3d03095070 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java @@ -0,0 +1,486 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoQueryException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.RequestContext; +import com.mongodb.ServerApi; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.SynchronousContextProvider; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; +import com.mongodb.internal.binding.ClusterBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.binding.ReadWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; +import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.createTimeoutContext; + +final class MongoClusterImpl implements MongoCluster { + @Nullable + private final AutoEncryptionSettings autoEncryptionSettings; + private final Cluster cluster; + private final CodecRegistry codecRegistry; + @Nullable + private final SynchronousContextProvider contextProvider; + @Nullable + private final Crypt crypt; + private final Object originator; + private final OperationExecutor operationExecutor; + private final ReadConcern readConcern; + private final ReadPreference readPreference; + private final boolean retryReads; + private final boolean retryWrites; + @Nullable + private final ServerApi serverApi; + private final ServerSessionPool serverSessionPool; + private final TimeoutSettings timeoutSettings; + private final UuidRepresentation uuidRepresentation; + private final WriteConcern writeConcern; + + MongoClusterImpl( + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final Cluster cluster, final CodecRegistry codecRegistry, + @Nullable final SynchronousContextProvider contextProvider, @Nullable final Crypt crypt, final Object originator, + @Nullable final OperationExecutor operationExecutor, final ReadConcern readConcern, final ReadPreference readPreference, + final boolean retryReads, final boolean retryWrites, @Nullable final ServerApi serverApi, + final ServerSessionPool serverSessionPool, final TimeoutSettings timeoutSettings, final UuidRepresentation uuidRepresentation, + final WriteConcern writeConcern) { + this.autoEncryptionSettings = autoEncryptionSettings; + this.cluster = cluster; + this.codecRegistry = codecRegistry; + this.contextProvider = contextProvider; + this.crypt = crypt; + this.originator = originator; + this.operationExecutor = operationExecutor != null ? operationExecutor : new OperationExecutorImpl(timeoutSettings); + this.readConcern = readConcern; + this.readPreference = readPreference; + this.retryReads = retryReads; + this.retryWrites = retryWrites; + this.serverApi = serverApi; + this.serverSessionPool = serverSessionPool; + this.timeoutSettings = timeoutSettings; + this.uuidRepresentation = uuidRepresentation; + this.writeConcern = writeConcern; + } + + @Override + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public WriteConcern getWriteConcern() { + return writeConcern; + } + + @Override + public ReadConcern getReadConcern() { + return readConcern; + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = timeoutSettings.getTimeoutMS(); + return timeoutMS == null ? null : timeUnit.convert(timeoutMS, TimeUnit.MILLISECONDS); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, + timeoutSettings.withTimeout(timeout, timeUnit), uuidRepresentation, writeConcern); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return new MongoDatabaseImpl(databaseName, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutSettings, operationExecutor); + } + + public Cluster getCluster() { + return cluster; + } + + @Nullable + public Crypt getCrypt() { + return crypt; + } + + public OperationExecutor getOperationExecutor() { + return operationExecutor; + } + + public ServerSessionPool getServerSessionPool() { + return serverSessionPool; + } + + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + + @Override + public ClientSession startSession() { + return startSession(ClientSessionOptions + .builder() + .defaultTransactionOptions(TransactionOptions.builder() + .readConcern(readConcern) + .writeConcern(writeConcern) + .build()) + .build()); + } + + @Override + public ClientSession startSession(final ClientSessionOptions options) { + notNull("options", options); + + ClientSessionOptions mergedOptions = ClientSessionOptions.builder(options) + .defaultTransactionOptions( + TransactionOptions.merge( + options.getDefaultTransactionOptions(), + TransactionOptions.builder() + .readConcern(readConcern) + .writeConcern(writeConcern) + .readPreference(readPreference) + .build())) + .build(); + return new ClientSessionImpl(serverSessionPool, originator, mergedOptions, operationExecutor); + } + + @Override + public MongoIterable listDatabaseNames() { + return createListDatabaseNamesIterable(null); + } + + @Override + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + notNull("clientSession", clientSession); + return createListDatabaseNamesIterable(clientSession); + } + + @Override + public ListDatabasesIterable listDatabases() { + return listDatabases(Document.class); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return listDatabases(clientSession, Document.class); + } + + @Override + public ListDatabasesIterable listDatabases(final Class clazz) { + return createListDatabasesIterable(null, clazz); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class clazz) { + notNull("clientSession", clientSession); + return createListDatabasesIterable(clientSession, clazz); + } + + @Override + public ChangeStreamIterable watch() { + return watch(Collections.emptyList()); + } + + @Override + public ChangeStreamIterable watch(final Class clazz) { + return watch(Collections.emptyList(), clazz); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return watch(pipeline, Document.class); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class clazz) { + return createChangeStreamIterable(null, pipeline, clazz); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return watch(clientSession, Collections.emptyList()); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class clazz) { + return watch(clientSession, Collections.emptyList(), clazz); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, Document.class); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class clazz) { + notNull("clientSession", clientSession); + return createChangeStreamIterable(clientSession, pipeline, clazz); + } + + private ListDatabasesIterable createListDatabasesIterable(@Nullable final ClientSession clientSession, final Class clazz) { + return new ListDatabasesIterableImpl<>(clientSession, clazz, codecRegistry, ReadPreference.primary(), operationExecutor, retryReads, timeoutSettings); + } + + private MongoIterable createListDatabaseNamesIterable(@Nullable final ClientSession clientSession) { + return createListDatabasesIterable(clientSession, BsonDocument.class) + .nameOnly(true) + .map(result -> result.getString("name").getValue()); + } + + private ChangeStreamIterable createChangeStreamIterable(@Nullable final ClientSession clientSession, + final List pipeline, final Class resultClass) { + return new ChangeStreamIterableImpl<>(clientSession, "admin", codecRegistry, readPreference, + readConcern, operationExecutor, pipeline, resultClass, ChangeStreamLevel.CLIENT, + retryReads, timeoutSettings); + } + + final class OperationExecutorImpl implements OperationExecutor { + private final TimeoutSettings executorTimeoutSettings; + + OperationExecutorImpl(final TimeoutSettings executorTimeoutSettings) { + this.executorTimeoutSettings = executorTimeoutSettings; + } + + @Override + public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern) { + return execute(operation, readPreference, readConcern, null); + } + + @Override + public T execute(final WriteOperation operation, final ReadConcern readConcern) { + return execute(operation, readConcern, null); + } + + @Override + public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern, + @Nullable final ClientSession session) { + if (session != null) { + session.notifyOperationInitiated(operation); + } + + ClientSession actualClientSession = getClientSession(session); + ReadBinding binding = getReadBinding(readPreference, readConcern, actualClientSession, session == null); + + try { + if (actualClientSession.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) { + throw new MongoClientException("Read preference in a transaction must be primary"); + } + return operation.execute(binding); + } catch (MongoException e) { + labelException(actualClientSession, e); + clearTransactionContextOnTransientTransactionError(session, e); + throw e; + } finally { + binding.release(); + } + } + + @Override + public T execute(final WriteOperation operation, final ReadConcern readConcern, + @Nullable final ClientSession session) { + if (session != null) { + session.notifyOperationInitiated(operation); + } + + ClientSession actualClientSession = getClientSession(session); + WriteBinding binding = getWriteBinding(readConcern, actualClientSession, session == null); + + try { + return operation.execute(binding); + } catch (MongoException e) { + labelException(actualClientSession, e); + clearTransactionContextOnTransientTransactionError(session, e); + throw e; + } finally { + binding.release(); + } + } + + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) { + if (Objects.equals(executorTimeoutSettings, newTimeoutSettings)) { + return this; + } + return new OperationExecutorImpl(newTimeoutSettings); + } + + @Override + public TimeoutSettings getTimeoutSettings() { + return executorTimeoutSettings; + } + + WriteBinding getWriteBinding(final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) { + return getReadWriteBinding(primary(), readConcern, session, ownsSession); + } + + ReadBinding getReadBinding(final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session, + final boolean ownsSession) { + return getReadWriteBinding(readPreference, readConcern, session, ownsSession); + } + + ReadWriteBinding getReadWriteBinding(final ReadPreference readPreference, + final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) { + + ClusterAwareReadWriteBinding readWriteBinding = new ClusterBinding(cluster, + getReadPreferenceForBinding(readPreference, session), readConcern, getOperationContext(session, readConcern)); + + if (crypt != null) { + readWriteBinding = new CryptBinding(readWriteBinding, crypt); + } + + return new ClientSessionBinding(session, ownsSession, readWriteBinding); + } + + private OperationContext getOperationContext(final ClientSession session, final ReadConcern readConcern) { + return new OperationContext( + getRequestContext(), + new ReadConcernAwareNoOpSessionContext(readConcern), + createTimeoutContext(session, executorTimeoutSettings), + serverApi); + } + + private RequestContext getRequestContext() { + RequestContext context = null; + if (contextProvider != null) { + context = contextProvider.getContext(); + } + return context == null ? IgnorableRequestContext.INSTANCE : context; + } + + private void labelException(final ClientSession session, final MongoException e) { + if (session.hasActiveTransaction() && (e instanceof MongoSocketException || e instanceof MongoTimeoutException + || e instanceof MongoQueryException && e.getCode() == 91) + && !e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { + e.addLabel(TRANSIENT_TRANSACTION_ERROR_LABEL); + } + } + + private void clearTransactionContextOnTransientTransactionError(@Nullable final ClientSession session, final MongoException e) { + if (session != null && e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)) { + session.clearTransactionContext(); + } + } + + private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) { + if (session == null) { + return readPreference; + } + if (session.hasActiveTransaction()) { + ReadPreference readPreferenceForBinding = session.getTransactionOptions().getReadPreference(); + if (readPreferenceForBinding == null) { + throw new MongoInternalException("Invariant violated. Transaction options read preference can not be null"); + } + return readPreferenceForBinding; + } + return readPreference; + } + + ClientSession getClientSession(@Nullable final ClientSession clientSessionFromOperation) { + ClientSession session; + if (clientSessionFromOperation != null) { + isTrue("ClientSession from same MongoClient", clientSessionFromOperation.getOriginator() == originator); + session = clientSessionFromOperation; + } else { + session = startSession(ClientSessionOptions.builder(). + causallyConsistent(false) + .defaultTransactionOptions( + TransactionOptions.builder() + .readConcern(ReadConcern.DEFAULT) + .readPreference(ReadPreference.primary()) + .writeConcern(WriteConcern.ACKNOWLEDGED).build()) + .build()); + } + return session; + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java index 2dca4baf3eb..8466950d7e5 100755 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java @@ -59,11 +59,11 @@ import com.mongodb.client.result.InsertManyResult; import com.mongodb.client.result.InsertOneResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.bulk.WriteRequest; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; import com.mongodb.internal.operation.IndexHelper; -import com.mongodb.internal.operation.RenameCollectionOperation; import com.mongodb.internal.operation.SyncOperations; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.lang.Nullable; @@ -77,6 +77,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.assertions.Assertions.notNullElements; @@ -85,6 +86,7 @@ import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE; import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; class MongoCollectionImpl implements MongoCollection { @@ -100,12 +102,15 @@ class MongoCollectionImpl implements MongoCollection { private final UuidRepresentation uuidRepresentation; @Nullable private final AutoEncryptionSettings autoEncryptionSettings; + + private final TimeoutSettings timeoutSettings; private final OperationExecutor executor; MongoCollectionImpl(final MongoNamespace namespace, final Class documentClass, final CodecRegistry codecRegistry, - final ReadPreference readPreference, final WriteConcern writeConcern, final boolean retryWrites, - final boolean retryReads, final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, - @Nullable final AutoEncryptionSettings autoEncryptionSettings, final OperationExecutor executor) { + final ReadPreference readPreference, final WriteConcern writeConcern, final boolean retryWrites, + final boolean retryReads, final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings, + final OperationExecutor executor) { this.namespace = notNull("namespace", namespace); this.documentClass = notNull("documentClass", documentClass); this.codecRegistry = notNull("codecRegistry", codecRegistry); @@ -117,8 +122,9 @@ class MongoCollectionImpl implements MongoCollection { this.executor = notNull("executor", executor); this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); this.autoEncryptionSettings = autoEncryptionSettings; + this.timeoutSettings = timeoutSettings; this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - retryWrites, retryReads); + retryWrites, retryReads, timeoutSettings); } @Override @@ -151,34 +157,46 @@ public ReadConcern getReadConcern() { return readConcern; } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = timeoutSettings.getTimeoutMS(); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + @Override public MongoCollection withDocumentClass(final Class clazz) { return new MongoCollectionImpl<>(namespace, clazz, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override public MongoCollection withCodecRegistry(final CodecRegistry codecRegistry) { return new MongoCollectionImpl<>(namespace, documentClass, withUuidRepresentation(codecRegistry, uuidRepresentation), - readPreference, writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + readPreference, writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override public MongoCollection withReadPreference(final ReadPreference readPreference) { return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override public MongoCollection withWriteConcern(final WriteConcern writeConcern) { return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override public MongoCollection withReadConcern(final ReadConcern readConcern) { return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, + readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings.withTimeout(timeout, timeUnit), executor); } @Override @@ -219,11 +237,13 @@ public long estimatedDocumentCount() { @Override public long estimatedDocumentCount(final EstimatedDocumentCountOptions options) { - return executor.execute(operations.estimatedDocumentCount(options), readPreference, readConcern, null); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.estimatedDocumentCount(options), readPreference, readConcern, null); } private long executeCount(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) { - return executor.execute(operations.countDocuments(filter, options), readPreference, readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.countDocuments(filter, options), readPreference, readConcern, clientSession); } @Override @@ -252,7 +272,7 @@ public DistinctIterable distinct(final ClientSession clientSe private DistinctIterable createDistinctIterable(@Nullable final ClientSession clientSession, final String fieldName, final Bson filter, final Class resultClass) { return new DistinctIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, - readPreference, readConcern, executor, fieldName, filter, retryReads); + readPreference, readConcern, executor, fieldName, filter, retryReads, timeoutSettings); } @Override @@ -303,7 +323,7 @@ public FindIterable find(final ClientSession clientSession, f private FindIterable createFindIterable(@Nullable final ClientSession clientSession, final Bson filter, final Class resultClass) { return new FindIterableImpl<>(clientSession, namespace, this.documentClass, resultClass, codecRegistry, - readPreference, readConcern, executor, filter, retryReads); + readPreference, readConcern, executor, filter, retryReads, timeoutSettings); } @Override @@ -332,7 +352,7 @@ private AggregateIterable createAggregateIterable(@Nullable f final List pipeline, final Class resultClass) { return new AggregateIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, - readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, retryReads); + readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, retryReads, timeoutSettings); } @Override @@ -381,7 +401,7 @@ private ChangeStreamIterable createChangeStreamIterable(@Null final List pipeline, final Class resultClass) { return new ChangeStreamIterableImpl<>(clientSession, namespace, codecRegistry, readPreference, readConcern, executor, - pipeline, resultClass, ChangeStreamLevel.COLLECTION, retryReads); + pipeline, resultClass, ChangeStreamLevel.COLLECTION, retryReads, timeoutSettings); } @SuppressWarnings("deprecation") @@ -417,7 +437,7 @@ private com.mongodb.client.MapReduceIterable createMapReduceI final String mapFunction, final String reduceFunction, final Class resultClass) { return new MapReduceIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, - readPreference, readConcern, writeConcern, executor, mapFunction, reduceFunction); + readPreference, readConcern, writeConcern, executor, mapFunction, reduceFunction, timeoutSettings); } @Override @@ -446,7 +466,8 @@ private BulkWriteResult executeBulkWrite(@Nullable final ClientSession clientSes final List> requests, final BulkWriteOptions options) { notNull("requests", requests); - return executor.execute(operations.bulkWrite(requests, options), readConcern, clientSession); + return getExecutor(timeoutSettings) + .execute(operations.bulkWrite(requests, options), readConcern, clientSession); } @Override @@ -501,8 +522,10 @@ public InsertManyResult insertMany(final ClientSession clientSession, final List } private InsertManyResult executeInsertMany(@Nullable final ClientSession clientSession, final List documents, - final InsertManyOptions options) { - return toInsertManyResult(executor.execute(operations.insertMany(documents, options), readConcern, clientSession)); + final InsertManyOptions options) { + return toInsertManyResult( + getExecutor(timeoutSettings).execute(operations.insertMany(documents, options), readConcern, clientSession) + ); } @Override @@ -693,7 +716,8 @@ public TDocument findOneAndDelete(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) { - return executor.execute(operations.findOneAndDelete(filter, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndDelete(filter, options), readConcern, clientSession); } @Override @@ -725,7 +749,8 @@ public TDocument findOneAndReplace(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndReplace(@Nullable final ClientSession clientSession, final Bson filter, final TDocument replacement, final FindOneAndReplaceOptions options) { - return executor.execute(operations.findOneAndReplace(filter, replacement, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndReplace(filter, replacement, options), readConcern, clientSession); } @Override @@ -757,7 +782,8 @@ public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { - return executor.execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); } @Override @@ -789,7 +815,8 @@ public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter, final List update, final FindOneAndUpdateOptions options) { - return executor.execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); } @Override @@ -840,14 +867,14 @@ public void updateSearchIndex(final String indexName, final Bson definition) { notNull("indexName", indexName); notNull("definition", definition); - executor.execute(operations.updateSearchIndex(indexName, definition), readConcern, null); + getExecutor(timeoutSettings).execute(operations.updateSearchIndex(indexName, definition), readConcern, null); } @Override public void dropSearchIndex(final String indexName) { notNull("indexName", indexName); - executor.execute(operations.dropSearchIndex(indexName), readConcern, null); + getExecutor(timeoutSettings).execute(operations.dropSearchIndex(indexName), readConcern, null); } @Override @@ -862,7 +889,8 @@ public ListSearchIndexesIterable listSearchIndexes(final Clas } private void executeDrop(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { - executor.execute(operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), readConcern, clientSession); + getExecutor(timeoutSettings) + .execute(operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), readConcern, clientSession); } @Override @@ -909,12 +937,13 @@ public List createIndexes(final ClientSession clientSession, final List< private List executeCreateIndexes(@Nullable final ClientSession clientSession, final List indexes, final CreateIndexOptions createIndexOptions) { - executor.execute(operations.createIndexes(indexes, createIndexOptions), readConcern, clientSession); + getExecutor(operations.createTimeoutSettings(createIndexOptions)) + .execute(operations.createIndexes(indexes, createIndexOptions), readConcern, clientSession); return IndexHelper.getIndexNames(indexes, codecRegistry); } private List executeCreateSearchIndexes(final List searchIndexModels) { - executor.execute(operations.createSearchIndexes(searchIndexModels), readConcern, null); + getExecutor(timeoutSettings).execute(operations.createSearchIndexes(searchIndexModels), readConcern, null); return IndexHelper.getSearchIndexNames(searchIndexModels); } @@ -942,12 +971,12 @@ public ListIndexesIterable listIndexes(final ClientSession cl private ListIndexesIterable createListIndexesIterable(@Nullable final ClientSession clientSession, final Class resultClass) { return new ListIndexesIterableImpl<>(clientSession, getNamespace(), resultClass, codecRegistry, ReadPreference.primary(), - executor, retryReads); + executor, retryReads, timeoutSettings); } private ListSearchIndexesIterable createListSearchIndexesIterable(final Class resultClass) { - return new ListSearchIndexesIterableImpl<>(getNamespace(), executor, - resultClass, codecRegistry, readPreference, retryReads); + return new ListSearchIndexesIterableImpl<>(getNamespace(), executor, resultClass, codecRegistry, readPreference, + retryReads, timeoutSettings); } @Override @@ -1014,13 +1043,16 @@ public void dropIndexes(final ClientSession clientSession, final DropIndexOption } private void executeDropIndex(@Nullable final ClientSession clientSession, final String indexName, - final DropIndexOptions dropIndexOptions) { - notNull("dropIndexOptions", dropIndexOptions); - executor.execute(operations.dropIndex(indexName, dropIndexOptions), readConcern, clientSession); + final DropIndexOptions options) { + notNull("options", options); + getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.dropIndex(indexName, options), readConcern, clientSession); } - private void executeDropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions dropIndexOptions) { - executor.execute(operations.dropIndex(keys, dropIndexOptions), readConcern, clientSession); + private void executeDropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) { + notNull("options", options); + getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.dropIndex(keys, options), readConcern, clientSession); } @Override @@ -1047,9 +1079,8 @@ public void renameCollection(final ClientSession clientSession, final MongoNames private void executeRenameCollection(@Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace, final RenameCollectionOptions renameCollectionOptions) { - executor.execute(new RenameCollectionOperation(getNamespace(), newCollectionNamespace, writeConcern) - .dropTarget(renameCollectionOptions.isDropTarget()), - readConcern, clientSession); + getExecutor(timeoutSettings) + .execute(operations.renameCollection(newCollectionNamespace, renameCollectionOptions), readConcern, clientSession); } private DeleteResult executeDelete(@Nullable final ClientSession clientSession, final Bson filter, final DeleteOptions deleteOptions, @@ -1081,7 +1112,8 @@ private BulkWriteResult executeSingleWriteRequest(@Nullable final ClientSession final WriteOperation writeOperation, final WriteRequest.Type type) { try { - return executor.execute(writeOperation, readConcern, clientSession); + return getExecutor(timeoutSettings) + .execute(writeOperation, readConcern, clientSession); } catch (MongoBulkWriteException e) { if (e.getWriteErrors().isEmpty()) { throw new MongoWriteConcernException(e.getWriteConcernError(), @@ -1138,4 +1170,8 @@ private UpdateResult toUpdateResult(final com.mongodb.bulk.BulkWriteResult resul } } + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java index 283f118af6b..b2b3284980d 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java @@ -31,6 +31,7 @@ import com.mongodb.client.MongoDatabase; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; import com.mongodb.internal.operation.SyncOperations; @@ -43,10 +44,12 @@ import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.MongoNamespace.COMMAND_COLLECTION_NAME; import static com.mongodb.MongoNamespace.checkDatabaseNameValidity; import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; /** @@ -60,16 +63,19 @@ public class MongoDatabaseImpl implements MongoDatabase { private final boolean retryWrites; private final boolean retryReads; private final ReadConcern readConcern; + private final UuidRepresentation uuidRepresentation; @Nullable private final AutoEncryptionSettings autoEncryptionSettings; + + private final TimeoutSettings timeoutSettings; private final OperationExecutor executor; - private final UuidRepresentation uuidRepresentation; private final SyncOperations operations; public MongoDatabaseImpl(final String name, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, - final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, - @Nullable final AutoEncryptionSettings autoEncryptionSettings, final OperationExecutor executor) { + final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, + final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings, + final OperationExecutor executor) { checkDatabaseNameValidity(name); this.name = notNull("name", name); this.codecRegistry = notNull("codecRegistry", codecRegistry); @@ -80,9 +86,10 @@ public MongoDatabaseImpl(final String name, final CodecRegistry codecRegistry, f this.readConcern = notNull("readConcern", readConcern); this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); this.autoEncryptionSettings = autoEncryptionSettings; + this.timeoutSettings = timeoutSettings; this.executor = notNull("executor", executor); this.operations = new SyncOperations<>(new MongoNamespace(name, COMMAND_COLLECTION_NAME), BsonDocument.class, readPreference, - codecRegistry, readConcern, writeConcern, retryWrites, retryReads); + codecRegistry, readConcern, writeConcern, retryWrites, retryReads, timeoutSettings); } @Override @@ -110,28 +117,40 @@ public ReadConcern getReadConcern() { return readConcern; } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = timeoutSettings.getTimeoutMS(); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + @Override public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) { return new MongoDatabaseImpl(name, withUuidRepresentation(codecRegistry, uuidRepresentation), readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override public MongoDatabase withReadPreference(final ReadPreference readPreference) { return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override public MongoDatabase withWriteConcern(final WriteConcern writeConcern) { return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override public MongoDatabase withReadConcern(final ReadConcern readConcern) { return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutSettings.withTimeout(timeout, timeUnit), executor); } @Override @@ -142,7 +161,7 @@ public MongoCollection getCollection(final String collectionName) { @Override public MongoCollection getCollection(final String collectionName, final Class documentClass) { return new MongoCollectionImpl<>(new MongoNamespace(name, collectionName), documentClass, codecRegistry, readPreference, - writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); } @Override @@ -193,7 +212,7 @@ private TResult executeCommand(@Nullable final ClientSession clientSes if (clientSession != null && clientSession.hasActiveTransaction() && !readPreference.equals(ReadPreference.primary())) { throw new MongoClientException("Read preference in a transaction must be primary"); } - return executor.execute(operations.commandRead(command, resultClass), readPreference, readConcern, clientSession); + return getExecutor().execute(operations.commandRead(command, resultClass), readPreference, readConcern, clientSession); } @Override @@ -208,7 +227,7 @@ public void drop(final ClientSession clientSession) { } private void executeDrop(@Nullable final ClientSession clientSession) { - executor.execute(operations.dropDatabase(), readConcern, clientSession); + getExecutor().execute(operations.dropDatabase(), readConcern, clientSession); } @Override @@ -251,7 +270,7 @@ private ListCollectionsIterableImpl createListCollectionsIter final Class resultClass, final boolean collectionNamesOnly) { return new ListCollectionsIterableImpl<>(clientSession, name, collectionNamesOnly, resultClass, codecRegistry, - ReadPreference.primary(), executor, retryReads); + ReadPreference.primary(), executor, retryReads, timeoutSettings); } @Override @@ -278,8 +297,8 @@ public void createCollection(final ClientSession clientSession, final String col private void executeCreateCollection(@Nullable final ClientSession clientSession, final String collectionName, final CreateCollectionOptions createCollectionOptions) { - executor.execute(operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings), readConcern, - clientSession); + getExecutor().execute(operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings), + readConcern, clientSession); } @Override @@ -374,19 +393,23 @@ private AggregateIterable createAggregateIterable(@Nullable f final List pipeline, final Class resultClass) { return new AggregateIterableImpl<>(clientSession, name, Document.class, resultClass, codecRegistry, - readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, retryReads); + readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, retryReads, timeoutSettings); } private ChangeStreamIterable createChangeStreamIterable(@Nullable final ClientSession clientSession, final List pipeline, final Class resultClass) { return new ChangeStreamIterableImpl<>(clientSession, name, codecRegistry, readPreference, readConcern, executor, - pipeline, resultClass, ChangeStreamLevel.DATABASE, retryReads); + pipeline, resultClass, ChangeStreamLevel.DATABASE, retryReads, timeoutSettings); } private void executeCreateView(@Nullable final ClientSession clientSession, final String viewName, final String viewOn, final List pipeline, final CreateViewOptions createViewOptions) { notNull("createViewOptions", createViewOptions); - executor.execute(operations.createView(viewName, viewOn, pipeline, createViewOptions), readConcern, clientSession); + getExecutor().execute(operations.createView(viewName, viewOn, pipeline, createViewOptions), readConcern, clientSession); + } + + private OperationExecutor getExecutor() { + return executor.withTimeoutSettings(timeoutSettings); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java index 86c2e7b99eb..d4b948c07a1 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java @@ -22,13 +22,17 @@ import com.mongodb.client.ClientSession; import com.mongodb.client.MongoCursor; import com.mongodb.client.MongoIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.lang.Nullable; import java.util.Collection; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; /** @@ -40,15 +44,18 @@ public abstract class MongoIterableImpl implements MongoIterable> asReadOperation(); @@ -58,8 +65,10 @@ ClientSession getClientSession() { return clientSession; } - OperationExecutor getExecutor() { - return executor; + protected abstract OperationExecutor getExecutor(); + + OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); } ReadPreference getReadPreference() { @@ -74,6 +83,10 @@ protected boolean getRetryReads() { return retryReads; } + protected TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + @Nullable public Integer getBatchSize() { return batchSize; @@ -85,6 +98,19 @@ public MongoIterable batchSize(final int batchSize) { return this; } + @Nullable + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + public MongoIterable timeoutMode(final TimeoutMode timeoutMode) { + if (timeoutSettings.getTimeoutMS() == null) { + throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set."); + } + this.timeoutMode = timeoutMode; + return this; + } + @Override public MongoCursor iterator() { return new MongoBatchCursorAdapter<>(execute()); @@ -127,6 +153,18 @@ public > A into(final A target) { } private BatchCursor execute() { - return executor.execute(asReadOperation(), readPreference, readConcern, clientSession); + return getExecutor().execute(asReadOperation(), readPreference, readConcern, clientSession); + } + + + protected long validateMaxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + Long timeoutMS = timeoutSettings.getTimeoutMS(); + long maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + + isTrueArgument("maxAwaitTimeMS must be less than timeoutMS", timeoutMS == null || timeoutMS == 0 + || timeoutMS > maxAwaitTimeMS); + + return maxAwaitTimeMS; } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java index 3786dc1ad6f..37df6dffe32 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java +++ b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java @@ -19,6 +19,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.lang.Nullable; @@ -33,10 +34,10 @@ public interface OperationExecutor { /** * Execute the read operation with the given read preference. * - * @param the operations result type. - * @param operation the read operation. + * @param the operations result type. + * @param operation the read operation. * @param readPreference the read preference. - * @param readConcern the read concern + * @param readConcern the read concern * @return the result of executing the operation. */ T execute(ReadOperation operation, ReadPreference readPreference, ReadConcern readConcern); @@ -44,9 +45,9 @@ public interface OperationExecutor { /** * Execute the write operation. * - * @param operation the write operation. + * @param the operations result type. + * @param operation the write operation. * @param readConcern the read concern - * @param the operations result type. * @return the result of executing the operation. */ T execute(WriteOperation operation, ReadConcern readConcern); @@ -54,11 +55,11 @@ public interface OperationExecutor { /** * Execute the read operation with the given read preference. * - * @param the operations result type. - * @param operation the read operation. + * @param the operations result type. + * @param operation the read operation. * @param readPreference the read preference. - * @param readConcern the read concern - * @param session the session to associate this operation with + * @param readConcern the read concern + * @param session the session to associate this operation with * @return the result of executing the operation. */ T execute(ReadOperation operation, ReadPreference readPreference, ReadConcern readConcern, @Nullable ClientSession session); @@ -66,11 +67,28 @@ public interface OperationExecutor { /** * Execute the write operation. * - * @param operation the write operation. + * @param the operations result type. + * @param operation the write operation. * @param readConcern the read concern - * @param session the session to associate this operation with - * @param the operations result type. + * @param session the session to associate this operation with * @return the result of executing the operation. */ T execute(WriteOperation operation, ReadConcern readConcern, @Nullable ClientSession session); + + /** + * Create a new OperationExecutor with a specific timeout settings + * + * @param timeoutSettings the TimeoutContext to use for the operations + * @return the new operation executor with the set timeout context + * @since 5.2 + */ + OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings); + + /** + * Returns the current timeout settings + * + * @return the timeout settings + * @since 5.2 + */ + TimeoutSettings getTimeoutSettings(); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java b/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java new file mode 100644 index 00000000000..6a5ef68e615 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

      This class is not part of the public API and may be removed or changed at any time

      + */ +public final class TimeoutHelper { + private static final String DEFAULT_TIMEOUT_MESSAGE = "Operation exceeded the timeout limit."; + + private TimeoutHelper() { + //NOP + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + final String message, + @Nullable final Timeout timeout) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> collection.withTimeout(0, MILLISECONDS), + ms -> collection.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return collection; + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + @Nullable final Timeout timeout) { + return collectionWithTimeout(collection, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> database.withTimeout(0, MILLISECONDS), + ms -> database.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return database; + } + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + @Nullable final Timeout timeout) { + return databaseWithTimeout(database, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + +} diff --git a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java index 864fdf004dc..6d529741a24 100644 --- a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java +++ b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java @@ -19,6 +19,7 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.MongoUpdatedEncryptedFieldsException; import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; import com.mongodb.client.FindIterable; import com.mongodb.client.MongoDatabase; import com.mongodb.client.model.CreateCollectionOptions; @@ -108,7 +109,7 @@ public interface ClientEncryption extends Closeable { * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption * @mongodb.driver.manual reference/operator/aggregation/match/ $match */ - @Beta(Beta.Reason.SERVER) + @Beta(Reason.SERVER) BsonDocument encryptExpression(Bson expression, EncryptOptions options); /** diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java index ef965f0ae95..2ac985f21a6 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java @@ -195,11 +195,11 @@ public void shouldPassAllOutcomes(final int maxPoolSize, } private void assertEventEquality(final TestCommandListener commandListener, final List expectedStartEvents) { - List actualStartedEvents = commandListener.getCommandStartedEvents(); + List actualStartedEvents = commandListener.getCommandStartedEvents(); assertEquals(expectedStartEvents.size(), actualStartedEvents.size()); for (int i = 0; i < expectedStartEvents.size(); i++) { ExpectedEvent expectedEvent = expectedStartEvents.get(i); - CommandStartedEvent actualEvent = (CommandStartedEvent) actualStartedEvents.get(i); + CommandStartedEvent actualEvent = actualStartedEvents.get(i); assertEquals(expectedEvent.getDatabase(), actualEvent.getDatabaseName(), "Database name"); assertEquals(expectedEvent.getCommandName(), actualEvent.getCommandName(), "Command name"); } diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java index 64f9568e4ed..25abafc65ee 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java @@ -20,12 +20,14 @@ import com.mongodb.MongoClientSettings; import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoWriteConcernException; import com.mongodb.WriteConcern; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.ValidationOptions; import com.mongodb.client.test.CollectionHelper; import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandStartedEvent; import com.mongodb.internal.connection.TestCommandListener; import com.mongodb.lang.Nullable; import org.bson.BsonArray; @@ -35,6 +37,7 @@ import org.bson.BsonUndefined; import org.bson.BsonValue; import org.bson.codecs.BsonDocumentCodec; +import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -50,6 +53,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.TimeUnit; import static com.mongodb.ClusterFixture.getEnv; import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; @@ -93,6 +97,11 @@ protected BsonDocument getDefinition() { return definition; } + + private boolean hasTimeoutError(@Nullable final BsonValue expectedResult) { + return hasErrorField(expectedResult, "isTimeoutError"); + } + private boolean hasErrorContainsField(@Nullable final BsonValue expectedResult) { return hasErrorField(expectedResult, "errorContains"); } @@ -127,7 +136,6 @@ public void setUp() { assumeTrue("Client side encryption tests disabled", hasEncryptionTestsEnabled()); assumeFalse("runOn requirements not satisfied", skipTest); assumeFalse("Skipping count tests", filename.startsWith("count.")); - assumeFalse("Skipping timeoutMS tests", filename.startsWith("timeoutMS.")); assumeFalse(definition.getString("skipReason", new BsonString("")).getValue(), definition.containsKey("skipReason")); @@ -262,6 +270,11 @@ public void setUp() { MongoClientSettings.Builder mongoClientSettingsBuilder = Fixture.getMongoClientSettingsBuilder() .addCommandListener(commandListener); + if (clientOptions.containsKey("timeoutMS")) { + long timeoutMs = clientOptions.getInt32("timeoutMS").longValue(); + mongoClientSettingsBuilder.timeout(timeoutMs, TimeUnit.MILLISECONDS); + } + if (!kmsProvidersMap.isEmpty()) { mongoClientSettingsBuilder.autoEncryptionSettings(AutoEncryptionSettings.builder() .keyVaultNamespace(keyVaultNamespace) @@ -276,6 +289,19 @@ public void setUp() { createMongoClient(mongoClientSettingsBuilder.build()); database = getDatabase(databaseName); helper = new JsonPoweredCrudTestHelper(description, database, database.getCollection(collectionName, BsonDocument.class)); + + if (definition.containsKey("failPoint")) { + collectionHelper.runAdminCommand(definition.getDocument("failPoint")); + } + } + + @After + public void cleanUp() { + if (collectionHelper != null && definition.containsKey("failPoint")) { + collectionHelper.runAdminCommand(new BsonDocument("configureFailPoint", + definition.getDocument("failPoint").getString("configureFailPoint")) + .append("mode", new BsonString("off"))); + } } protected abstract void createMongoClient(MongoClientSettings settings); @@ -285,12 +311,15 @@ public void setUp() { @Test public void shouldPassAllOutcomes() { + assumeTrue("Skipping timeoutMS tests", filename.startsWith("timeoutMS.")); for (BsonValue cur : definition.getArray("operations")) { BsonDocument operation = cur.asDocument(); String operationName = operation.getString("name").getValue(); BsonValue expectedResult = operation.get("result"); try { BsonDocument actualOutcome = helper.getOperationResults(operation); + assertFalse(String.format("Expected a timeout error but got: %s", actualOutcome.toJson()), hasTimeoutError(expectedResult)); + if (expectedResult != null) { BsonValue actualResult = actualOutcome.get("result", new BsonString("No result or error")); assertBsonValue("Expected operation result differs from actual", expectedResult, actualResult); @@ -302,6 +331,9 @@ public void shouldPassAllOutcomes() { getErrorCodeNameField(expectedResult), operationName), hasErrorCodeNameField(expectedResult)); } catch (Exception e) { boolean passedAssertion = false; + if (hasTimeoutError(expectedResult) && e instanceof MongoOperationTimeoutException){ + passedAssertion = true; + } if (hasErrorContainsField(expectedResult)) { String expectedError = getErrorContainsField(expectedResult); assertTrue(String.format("Expected '%s' but got '%s' for operation %s", expectedError, e.getMessage(), @@ -325,8 +357,8 @@ public void shouldPassAllOutcomes() { } if (definition.containsKey("expectations")) { - List expectedEvents = getExpectedEvents(definition.getArray("expectations"), "default", null); - List events = commandListener.getCommandStartedEvents(); + List expectedEvents = getExpectedEvents(definition.getArray("expectations"), specDocument.getString("database_name").getValue(), null); + List events = commandListener.getCommandStartedEvents(); assertEventsEquality(expectedEvents, events); } diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java new file mode 100644 index 00000000000..418f874aabe --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java @@ -0,0 +1,954 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ClusterFixture; +import com.mongodb.ConnectionString; +import com.mongodb.CursorType; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCredential; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSDownloadStream; +import com.mongodb.client.gridfs.GridFSUploadStream; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.internal.connection.ServerHelper; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.test.FlakyTest; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.time.Instant; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.applyTimeoutMultiplierForServerless; +import static com.mongodb.ClusterFixture.getConnectionString; +import static com.mongodb.ClusterFixture.isAuthenticated; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isServerlessTest; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.ClusterFixture.sleep; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getPrimary; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * See + *
      Prose Tests. + */ +@SuppressWarnings("checkstyle:VisibilityModifier") +public abstract class AbstractClientSideOperationsTimeoutProseTest { + + protected static final String FAIL_COMMAND_NAME = "failCommand"; + protected static final String GRID_FS_BUCKET_NAME = "db.fs"; + private static final AtomicInteger COUNTER = new AtomicInteger(); + + protected MongoNamespace namespace; + protected MongoNamespace gridFsFileNamespace; + protected MongoNamespace gridFsChunksNamespace; + + protected CollectionHelper collectionHelper; + private CollectionHelper filesCollectionHelper; + private CollectionHelper chunksCollectionHelper; + + protected TestCommandListener commandListener; + + protected abstract MongoClient createMongoClient(MongoClientSettings mongoClientSettings); + + protected abstract GridFSBucket createGridFsBucket(MongoDatabase mongoDatabase, String bucketName); + + protected abstract boolean isAsync(); + + protected int postSessionCloseSleep() { + return 0; + } + + @SuppressWarnings("try") + @FlakyTest(maxAttempts = 3) + @DisplayName("4. Background Connection Pooling - timeoutMS used for handshake commands") + public void testBackgroundConnectionPoolingTimeoutMSUsedForHandshakeCommands() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isAuthenticated()); + assumeFalse(isServerlessTest()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: {" + + " times: 1" + + " }," + + " data: {" + + " failCommands: [\"saslContinue\"]," + + " blockConnection: true," + + " blockTimeMS: 150," + + " appName: \"timeoutBackgroundPoolTest\"" + + " }" + + "}"); + + TestConnectionPoolListener connectionPoolListener = new TestConnectionPoolListener(); + + try (MongoClient ignoredClient = createMongoClient(getMongoClientSettingsBuilder() + .applicationName("timeoutBackgroundPoolTest") + .applyToConnectionPoolSettings(builder -> { + builder.minSize(1); + builder.addConnectionPoolListener(connectionPoolListener); + }) + .timeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) { + + assertDoesNotThrow(() -> + connectionPoolListener.waitForEvents(asList(ConnectionCreatedEvent.class, ConnectionClosedEvent.class), + 10, TimeUnit.SECONDS)); + } + } + + @SuppressWarnings("try") + @FlakyTest(maxAttempts = 3) + @DisplayName("4. Background Connection Pooling - timeoutMS is refreshed for each handshake command") + public void testBackgroundConnectionPoolingTimeoutMSIsRefreshedForEachHandshakeCommand() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isAuthenticated()); + assumeFalse(isServerlessTest()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"hello\", \"isMaster\", \"saslContinue\"]," + + " blockConnection: true," + + " blockTimeMS: 150," + + " appName: \"refreshTimeoutBackgroundPoolTest\"" + + " }" + + "}"); + + TestConnectionPoolListener connectionPoolListener = new TestConnectionPoolListener(); + + try (MongoClient ignoredClient = createMongoClient(getMongoClientSettingsBuilder() + .applicationName("refreshTimeoutBackgroundPoolTest") + .applyToConnectionPoolSettings(builder -> { + builder.minSize(1); + builder.addConnectionPoolListener(connectionPoolListener); + }) + .timeout(applyTimeoutMultiplierForServerless(250), TimeUnit.MILLISECONDS))) { + + assertDoesNotThrow(() -> + connectionPoolListener.waitForEvents(asList(ConnectionCreatedEvent.class, ConnectionReadyEvent.class), + 10, TimeUnit.SECONDS)); + } + } + + @FlakyTest(maxAttempts = 3) + @DisplayName("5. Blocking Iteration Methods - Tailable cursors") + public void testBlockingIterationMethodsTailableCursor() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isServerlessTest()); + + collectionHelper.create(namespace.getCollectionName(), + new CreateCollectionOptions().capped(true).sizeInBytes(10 * 1024 * 1024)); + collectionHelper.insertDocuments(singletonList(BsonDocument.parse("{x: 1}")), WriteConcern.MAJORITY); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"getMore\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(150) + + " }" + + "}"); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(applyTimeoutMultiplierForServerless(250), TimeUnit.MILLISECONDS))) { + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (MongoCursor cursor = collection.find().cursorType(CursorType.Tailable).cursor()) { + Document document = assertDoesNotThrow(cursor::next); + assertEquals(1, document.get("x")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + } + + List events = commandListener.getCommandSucceededEvents(); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count()); + long getMoreCount = events.stream().filter(e -> e.getCommandName().equals("getMore")).count(); + assertTrue(getMoreCount <= 2, "getMoreCount expected to less than or equal to two but was: " + getMoreCount); + } + } + + @FlakyTest(maxAttempts = 3) + @DisplayName("5. Blocking Iteration Methods - Change Streams") + public void testBlockingIterationMethodsChangeStream() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + assumeFalse(isServerlessTest()); + assumeFalse(isAsync()); // Async change stream cursor is non-deterministic for cursor::next + + BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + sleep(applyTimeoutMultiplierForServerless(2000)); + collectionHelper.insertDocuments(singletonList(BsonDocument.parse("{x: 1}")), WriteConcern.MAJORITY); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"getMore\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(150) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(applyTimeoutMultiplierForServerless(250), TimeUnit.MILLISECONDS))) { + + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + try (MongoChangeStreamCursor> cursor = collection.watch( + singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}"))) + .startAtOperationTime(startTime) + .fullDocument(FullDocument.UPDATE_LOOKUP) + .cursor()) { + ChangeStreamDocument document = assertDoesNotThrow(cursor::next); + + Document fullDocument = document.getFullDocument(); + assertNotNull(fullDocument); + assertEquals(1, fullDocument.get("x")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + } + List events = commandListener.getCommandSucceededEvents(); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("aggregate")).count()); + long getMoreCount = events.stream().filter(e -> e.getCommandName().equals("getMore")).count(); + assertTrue(getMoreCount <= 2, "getMoreCount expected to less than or equal to two but was: " + getMoreCount); + } + } + + @DisplayName("6. GridFS Upload - uploads via openUploadStream can be timed out") + @Test + public void testGridFSUploadViaOpenUploadStreamTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(205)) + + " }" + + "}"); + + chunksCollectionHelper.create(); + filesCollectionHelper.create(); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(rtt + applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(namespace.getDatabaseName()); + GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME); + + try (GridFSUploadStream uploadStream = gridFsBucket.openUploadStream("filename")){ + uploadStream.write(0x12); + assertThrows(MongoOperationTimeoutException.class, uploadStream::close); + } + } + } + + @DisplayName("6. GridFS Upload - Aborting an upload stream can be timed out") + @Test + public void testAbortingGridFsUploadStreamTimeout() throws Throwable { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"delete\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(305)) + + " }" + + "}"); + + chunksCollectionHelper.create(); + filesCollectionHelper.create(); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(rtt + applyTimeoutMultiplierForServerless(300), TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(namespace.getDatabaseName()); + GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME).withChunkSizeBytes(2); + + try (GridFSUploadStream uploadStream = gridFsBucket.openUploadStream("filename")){ + uploadStream.write(new byte[]{0x01, 0x02, 0x03, 0x04}); + assertThrows(MongoOperationTimeoutException.class, uploadStream::abort); + } + } + } + + @DisplayName("6. GridFS Download") + @Test + public void testGridFsDownloadStreamTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + chunksCollectionHelper.create(); + filesCollectionHelper.create(); + + filesCollectionHelper.insertDocuments(singletonList(BsonDocument.parse( + "{" + + " _id: {" + + " $oid: \"000000000000000000000005\"" + + " }," + + " length: 10," + + " chunkSize: 4," + + " uploadDate: {" + + " $date: \"1970-01-01T00:00:00.000Z\"" + + " }," + + " md5: \"57d83cd477bfb1ccd975ab33d827a92b\"," + + " filename: \"length-10\"," + + " contentType: \"application/octet-stream\"," + + " aliases: []," + + " metadata: {}" + + "}" + )), WriteConcern.MAJORITY); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { skip: 1 }," + + " data: {" + + " failCommands: [\"find\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(95)) + + " }" + + "}"); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(rtt + applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(namespace.getDatabaseName()); + GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME).withChunkSizeBytes(2); + + try (GridFSDownloadStream downloadStream = gridFsBucket.openDownloadStream(new ObjectId("000000000000000000000005"))){ + assertThrows(MongoOperationTimeoutException.class, downloadStream::read); + + List events = commandListener.getCommandStartedEvents(); + List findCommands = events.stream().filter(e -> e.getCommandName().equals("find")).collect(Collectors.toList()); + + assertEquals(2, findCommands.size()); + assertEquals(gridFsFileNamespace.getCollectionName(), findCommands.get(0).getCommand().getString("find").getValue()); + assertEquals(gridFsChunksNamespace.getCollectionName(), findCommands.get(1).getCommand().getString("find").getValue()); + } + } + } + + @DisplayName("8. Server Selection 1 / 2") + @ParameterizedTest(name = "[{index}] {0}") + @MethodSource("test8ServerSelectionArguments") + public void test8ServerSelection(final String connectionString) { + assumeFalse(isServerlessTest()); + int timeoutBuffer = 100; // 5 in spec, Java is slower + // 1. Create a MongoClient + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .applyConnectionString(new ConnectionString(connectionString))) + ) { + long start = System.nanoTime(); + // 2. Using client, execute: + Throwable throwable = assertThrows(MongoTimeoutException.class, () -> { + mongoClient.getDatabase("admin").runCommand(new BsonDocument("ping", new BsonInt32(1))); + }); + // Expect this to fail with a server selection timeout error after no more than 15ms [this is increased] + long elapsed = msElapsedSince(start); + assertTrue(throwable.getMessage().contains("while waiting for a server")); + assertTrue(elapsed < 10 + timeoutBuffer, "Took too long to time out, elapsedMS: " + elapsed); + } + } + + @DisplayName("8. Server Selection 2 / 2") + @ParameterizedTest(name = "[{index}] {0}") + @MethodSource("test8ServerSelectionHandshakeArguments") + public void test8ServerSelectionHandshake(final String ignoredTestName, final int timeoutMS, final int serverSelectionTimeoutMS) { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isAuthenticated()); + assumeFalse(isServerlessTest()); + + MongoCredential credential = getConnectionString().getCredential(); + assertNotNull(credential); + assertNull(credential.getAuthenticationMechanism()); + + MongoNamespace namespace = generateNamespace(); + collectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), namespace); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"saslContinue\"]," + + " blockConnection: true," + + " blockTimeMS: 350" + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(timeoutMS, TimeUnit.MILLISECONDS) + .applyToClusterSettings(b -> b.serverSelectionTimeout(serverSelectionTimeoutMS, TimeUnit.MILLISECONDS)) + .retryWrites(false))) { + + long start = System.nanoTime(); + assertThrows(MongoOperationTimeoutException.class, () -> { + mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()) + .insertOne(new Document("x", 1)); + }); + long elapsed = msElapsedSince(start); + assertTrue(elapsed <= 310, "Took too long to time out, elapsedMS: " + elapsed); + } + } + + @SuppressWarnings("try") + @DisplayName("9. End Session. The timeout specified via the MongoClient timeoutMS option") + @Test + public void test9EndSessionClientTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isServerlessTest()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"abortTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(150) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder().retryWrites(false) + .timeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession()) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + + long start = System.nanoTime(); + session.close(); + long elapsed = msElapsedSince(start) - postSessionCloseSleep(); + assertTrue(elapsed <= applyTimeoutMultiplierForServerless(150), "Took too long to time out, elapsedMS: " + elapsed); + } + } + CommandFailedEvent abortTransactionEvent = assertDoesNotThrow(() -> + commandListener.getCommandFailedEvent("abortTransaction")); + assertInstanceOf(MongoOperationTimeoutException.class, abortTransactionEvent.getThrowable()); + } + + @SuppressWarnings("try") + @DisplayName("9. End Session. The timeout specified via the ClientSession defaultTimeoutMS option") + @Test + public void test9EndSessionSessionTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isServerlessTest()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"abortTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(150) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(applyTimeoutMultiplierForServerless((100)), TimeUnit.MILLISECONDS).build())) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + + long start = System.nanoTime(); + session.close(); + long elapsed = msElapsedSince(start) - postSessionCloseSleep(); + assertTrue(elapsed <= applyTimeoutMultiplierForServerless(150), "Took too long to time out, elapsedMS: " + elapsed); + } + } + CommandFailedEvent abortTransactionEvent = assertDoesNotThrow(() -> + commandListener.getCommandFailedEvent("abortTransaction")); + assertInstanceOf(MongoOperationTimeoutException.class, abortTransactionEvent.getThrowable()); + } + + @DisplayName("9. End Session - Custom Test: Each operation has its own timeout with commit") + @Test + public void test9EndSessionCustomTesEachOperationHasItsOwnTimeoutWithCommit() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + 25 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + sleep(applyTimeoutMultiplierForServerless(200)); + + assertDoesNotThrow(session::commitTransaction); + } + } + assertDoesNotThrow(() -> commandListener.getCommandSucceededEvent("commitTransaction")); + } + + @DisplayName("9. End Session - Custom Test: Each operation has its own timeout with abort") + @Test + public void test9EndSessionCustomTesEachOperationHasItsOwnTimeoutWithAbort() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + 25 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + sleep(applyTimeoutMultiplierForServerless(200)); + + assertDoesNotThrow(session::close); + } + } + assertDoesNotThrow(() -> commandListener.getCommandSucceededEvent("abortTransaction")); + } + + @DisplayName("10. Convenient Transactions") + @Test + public void test10ConvenientTransactions() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isAsync()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 2 }," + + " data: {" + + " failCommands: [\"insert\", \"abortTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(150) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession()) { + assertThrows(MongoOperationTimeoutException.class, + () -> session.withTransaction(() -> collection.insertOne(session, new Document("x", 1)))); + } + + List failedEvents = commandListener.getEvents().stream() + .filter(e -> e instanceof CommandFailedEvent) + .collect(Collectors.toList()); + + assertEquals(1, failedEvents.stream().filter(e -> e.getCommandName().equals("insert")).count()); + assertEquals(1, failedEvents.stream().filter(e -> e.getCommandName().equals("abortTransaction")).count()); + } + } + + @DisplayName("10. Convenient Transactions - Custom Test: with transaction uses a single timeout") + @Test + public void test10CustomTestWithTransactionUsesASingleTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isAsync()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(25) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) { + assertThrows(MongoOperationTimeoutException.class, + () -> session.withTransaction(() -> { + collection.insertOne(session, new Document("x", 1)); + sleep(applyTimeoutMultiplierForServerless(200)); + return true; + }) + ); + } + } + } + + @DisplayName("10. Convenient Transactions - Custom Test: with transaction uses a single timeout - lock") + @Test + public void test10CustomTestWithTransactionUsesASingleTimeoutWithLock() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isAsync()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(25) + + " errorCode: " + 24 + + " errorLabels: [\"TransientTransactionError\"]" + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) { + assertThrows(MongoOperationTimeoutException.class, + () -> session.withTransaction(() -> { + collection.insertOne(session, new Document("x", 1)); + sleep(applyTimeoutMultiplierForServerless(200)); + return true; + }) + ); + } + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + @DisplayName("Should ignore wTimeoutMS of WriteConcern to initial and subsequent commitTransaction operations") + public void shouldIgnoreWtimeoutMsOfWriteConcernToInitialAndSubsequentCommitTransactionOperations() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS) + .build())) { + session.startTransaction(TransactionOptions.builder() + .writeConcern(WriteConcern.ACKNOWLEDGED.withWTimeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS)) + .build()); + collection.insertOne(session, new Document("x", 1)); + sleep(applyTimeoutMultiplierForServerless(200)); + + assertDoesNotThrow(session::commitTransaction); + //repeat commit. + assertDoesNotThrow(session::commitTransaction); + } + } + List commandStartedEvents = commandListener.getCommandStartedEvents("commitTransaction"); + assertEquals(2, commandStartedEvents.size()); + + commandStartedEvents.forEach(e -> { + BsonDocument command = e.getCommand(); + if (command.containsKey("writeConcern")) { + BsonDocument writeConcern = command.getDocument("writeConcern"); + assertFalse(writeConcern.isEmpty()); + assertFalse(writeConcern.containsKey("wtimeout")); + }}); + } + + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("KillCursors is not executed after getMore network error when timeout is not enabled") + @Test + public void testKillCursorsIsNotExecutedAfterGetMoreNetworkErrorWhenTimeoutIsNotEnabled() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isServerlessTest()); + + long rtt = ClusterFixture.getPrimaryRTT(); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + collectionHelper.insertDocuments(new Document(), new Document()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"getMore\" ]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(600)) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .retryReads(true) + .applyToSocketSettings(builder -> builder.readTimeout(applyTimeoutMultiplierForServerless(500), TimeUnit.MILLISECONDS)))) { + + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + MongoCursor cursor = collection.find() + .batchSize(1) + .cursor(); + + cursor.next(); + assertThrows(MongoSocketReadTimeoutException.class, cursor::next); + cursor.close(); + } + + List events = commandListener.getCommandStartedEvents(); + assertEquals(2, events.size(), "Actual events: " + events.stream() + .map(CommandStartedEvent::getCommandName) + .collect(Collectors.toList())); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count()); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("getMore")).count()); + + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("KillCursors is not executed after getMore network error") + @Test + public void testKillCursorsIsNotExecutedAfterGetMoreNetworkError() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isServerlessTest()); + + long rtt = ClusterFixture.getPrimaryRTT(); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + collectionHelper.insertDocuments(new Document(), new Document()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"getMore\" ]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(600)) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(applyTimeoutMultiplierForServerless(500), TimeUnit.MILLISECONDS))) { + + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + MongoCursor cursor = collection.find() + .batchSize(1) + .cursor(); + + cursor.next(); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + cursor.close(); + } + + List events = commandListener.getCommandStartedEvents(); + assertEquals(2, events.size(), "Actual events: " + events.stream() + .map(CommandStartedEvent::getCommandName) + .collect(Collectors.toList())); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count()); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("getMore")).count()); + + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + @DisplayName("Should throw timeout exception for subsequent commit transaction") + public void shouldThrowTimeoutExceptionForSubsequentCommitTransaction() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS) + .build())) { + session.startTransaction(TransactionOptions.builder().build()); + collection.insertOne(session, new Document("x", 1)); + sleep(applyTimeoutMultiplierForServerless(200)); + + assertDoesNotThrow(session::commitTransaction); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"commitTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + applyTimeoutMultiplierForServerless(500) + + " }" + + "}"); + + //repeat commit. + assertThrows(MongoOperationTimeoutException.class, session::commitTransaction); + } + } + List commandStartedEvents = commandListener.getCommandStartedEvents("commitTransaction"); + assertEquals(2, commandStartedEvents.size()); + + List failedEvents = commandListener.getCommandFailedEvents("commitTransaction"); + assertEquals(1, failedEvents.size()); + } + + private static Stream test8ServerSelectionArguments() { + return Stream.of( + Arguments.of(Named.of("serverSelectionTimeoutMS honored if timeoutMS is not set", + "mongodb://invalid/?serverSelectionTimeoutMS=10")), + Arguments.of(Named.of("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", + "mongodb://invalid/?timeoutMS=200&serverSelectionTimeoutMS=10")), + Arguments.of(Named.of("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", + "mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=200")), + Arguments.of(Named.of("serverSelectionTimeoutMS honored for server selection if timeoutMS=0", + "mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10")) + + ); + } + + private static Stream test8ServerSelectionHandshakeArguments() { + return Stream.of( + Arguments.of("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", 200, 300), + Arguments.of("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", 300, 200) + ); + } + + protected MongoNamespace generateNamespace() { + return new MongoNamespace(getDefaultDatabaseName(), + getClass().getSimpleName() + "_" + COUNTER.incrementAndGet()); + } + + protected MongoClientSettings.Builder getMongoClientSettingsBuilder() { + commandListener.reset(); + return Fixture.getMongoClientSettingsBuilder() + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.primary()) + .addCommandListener(commandListener); + } + + @BeforeEach + public void setUp() { + namespace = generateNamespace(); + gridFsFileNamespace = new MongoNamespace(getDefaultDatabaseName(), GRID_FS_BUCKET_NAME + ".files"); + gridFsChunksNamespace = new MongoNamespace(getDefaultDatabaseName(), GRID_FS_BUCKET_NAME + ".chunks"); + + collectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), namespace); + filesCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), gridFsFileNamespace); + chunksCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), gridFsChunksNamespace); + commandListener = new TestCommandListener(); + } + + @AfterEach + public void tearDown() { + ClusterFixture.disableFailPoint(FAIL_COMMAND_NAME); + if (collectionHelper != null) { + collectionHelper.drop(); + filesCollectionHelper.drop(); + chunksCollectionHelper.drop(); + commandListener.reset(); + try { + ServerHelper.checkPool(getPrimary()); + } catch (InterruptedException e) { + // ignore + } + } + } + + @AfterAll + public static void finalTearDown() { + CollectionHelper.dropDatabase(getDefaultDatabaseName()); + } + + private MongoClient createMongoClient(final MongoClientSettings.Builder builder) { + return createMongoClient(builder.build()); + } + + private long msElapsedSince(final long t1) { + return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t1); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java index 894d291a743..0aa2ff28536 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java @@ -18,7 +18,7 @@ import com.mongodb.ConnectionString; import com.mongodb.MongoClientSettings; import com.mongodb.ServerAddress; -import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandStartedEvent; import com.mongodb.internal.connection.TestCommandListener; import org.bson.BsonArray; import org.bson.BsonBoolean; @@ -133,7 +133,7 @@ private static Map doSelections(final MongoCollection result : results) { result.get(); } - List commandStartedEvents = commandListener.getCommandStartedEvents(); + List commandStartedEvents = commandListener.getCommandStartedEvents(); assertEquals(tasks * opsPerTask, commandStartedEvents.size()); return commandStartedEvents.stream() .collect(groupingBy(event -> event.getConnectionDescription().getServerAddress())) diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java index e6c9b66d1b1..e927192ac8d 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java @@ -41,6 +41,7 @@ protected MongoDatabase getDatabase(final String databaseName) { @After public void cleanUp() { + super.cleanUp(); if (mongoClient != null) { mongoClient.close(); } diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java new file mode 100644 index 00000000000..fc80e2f1139 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSBuckets; + + +/** + * See https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.rst#prose-tests + */ +public final class ClientSideOperationTimeoutProseTest extends AbstractClientSideOperationsTimeoutProseTest { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) { + return MongoClients.create(mongoClientSettings); + } + + @Override + protected GridFSBucket createGridFsBucket(final MongoDatabase mongoDatabase, final String bucketName) { + return GridFSBuckets.create(mongoDatabase, bucketName); + } + + @Override + protected boolean isAsync() { + return false; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java new file mode 100644 index 00000000000..c4068375f9f --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.client.unified.UnifiedSyncTest; +import org.junit.jupiter.params.provider.Arguments; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Collection; + +import static org.junit.jupiter.api.Assumptions.assumeFalse; + + +// See https://github.com/mongodb/specifications/tree/master/source/client-side-operation-timeout/tests +public class ClientSideOperationTimeoutTest extends UnifiedSyncTest { + + private static Collection data() throws URISyntaxException, IOException { + return getTestData("unified-test-format/client-side-operation-timeout"); + } + + @Override + protected void skips(final String fileDescription, final String testDescription) { + skipOperationTimeoutTests(fileDescription, testDescription); + } + + public static void skipOperationTimeoutTests(final String fileDescription, final String testDescription) { + + if (ClusterFixture.isServerlessTest()) { + + // It is not possible to create capped collections on serverless instances. + assumeFalse(fileDescription.equals("timeoutMS behaves correctly for tailable awaitData cursors")); + assumeFalse(fileDescription.equals("timeoutMS behaves correctly for tailable non-awaitData cursors")); + + /* Drivers MUST NOT execute a killCursors command because the pinned connection is no longer under a load balancer. */ + assumeFalse(testDescription.equals("timeoutMS is refreshed for close")); + + /* Flaky tests. We have to retry them once we have a Junit5 rule. */ + assumeFalse(testDescription.equals("remaining timeoutMS applied to getMore if timeoutMode is unset")); + assumeFalse(testDescription.equals("remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime")); + assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if timeoutMode is iteration - success")); + assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if timeoutMode is iteration - failure")); + } + assumeFalse(testDescription.contains("maxTimeMS is ignored if timeoutMS is set - createIndex on collection"), + "No maxTimeMS parameter for createIndex() method"); + assumeFalse(fileDescription.startsWith("runCursorCommand"), "No run cursor command"); + assumeFalse(testDescription.contains("runCommand on database"), "No special handling of runCommand"); + assumeFalse(testDescription.endsWith("count on collection"), "No count command helper"); + assumeFalse(fileDescription.equals("timeoutMS can be overridden for an operation"), "No operation based overrides"); + assumeFalse(testDescription.equals("timeoutMS can be overridden for commitTransaction") + || testDescription.equals("timeoutMS applied to abortTransaction"), + "No operation session based overrides"); + assumeFalse(fileDescription.equals("timeoutMS behaves correctly when closing cursors") + && testDescription.equals("timeoutMS can be overridden for close"), "No operation based overrides"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java index 4ab1d179611..cea89765756 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java @@ -17,7 +17,6 @@ package com.mongodb.client; import com.mongodb.ReadConcern; -import com.mongodb.event.CommandEvent; import com.mongodb.event.CommandStartedEvent; import com.mongodb.internal.connection.TestCommandListener; import org.bson.BsonDocument; @@ -60,7 +59,7 @@ public void shouldIncludeReadConcernInCommand() { mongoClient.getDatabase(getDefaultDatabaseName()).getCollection("test") .withReadConcern(ReadConcern.LOCAL).find().into(new ArrayList<>()); - List events = commandListener.getCommandStartedEvents(); + List events = commandListener.getCommandStartedEvents(); BsonDocument commandDocument = new BsonDocument("find", new BsonString("test")) .append("readConcern", ReadConcern.LOCAL.asDocument()) diff --git a/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java b/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java index 4b7dc8d9310..cf8c3bfc292 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java +++ b/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java @@ -26,9 +26,10 @@ import com.mongodb.event.ServerHeartbeatSucceededEvent; import com.mongodb.event.ServerListener; import com.mongodb.event.ServerMonitorListener; -import com.mongodb.internal.time.Timeout; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.TimePointTest; +import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; @@ -56,6 +57,7 @@ import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.Collections.singleton; @@ -267,21 +269,14 @@ public void monitorsSleepAtLeastMinHeartbeatFreqencyMSBetweenChecks() { private static void assertPoll(final BlockingQueue queue, @Nullable final Class allowed, final Set> required) throws InterruptedException { - assertPoll(queue, allowed, required, Timeout.startNow(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS)); + assertPoll(queue, allowed, required, Timeout.expiresIn(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS, ZERO_DURATION_MEANS_EXPIRED)); } private static void assertPoll(final BlockingQueue queue, @Nullable final Class allowed, final Set> required, final Timeout timeout) throws InterruptedException { Set> encountered = new HashSet<>(); while (true) { - Object element; - if (timeout.isImmediate()) { - element = queue.poll(); - } else if (timeout.isInfinite()) { - element = queue.take(); - } else { - element = queue.poll(timeout.remaining(NANOSECONDS), NANOSECONDS); - } + Object element = poll(queue, timeout); if (element != null) { if (LOGGER.isInfoEnabled()) { LOGGER.info("Polled " + element); @@ -299,12 +294,29 @@ private static void assertPoll(final BlockingQueue queue, @Nullable final Cla return; } } - if (timeout.expired()) { + if (TimePointTest.hasExpired(timeout)) { fail(format("encountered %s, required %s", encountered, required)); } } } + @Nullable + private static Object poll(final BlockingQueue queue, final Timeout timeout) throws InterruptedException { + long remainingNs = timeout.call(NANOSECONDS, + () -> -1L, + (ns) -> ns, + () -> 0L); + Object element; + if (remainingNs == -1) { + element = queue.take(); + } else if (remainingNs == 0) { + element = queue.poll(); + } else { + element = queue.poll(remainingNs, NANOSECONDS); + } + return element; + } + private static Optional> findAssignable(final Class from, final Set> toAnyOf) { return toAnyOf.stream().filter(to -> to.isAssignableFrom(from)).findAny(); } diff --git a/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java index f9093dc4ae5..b09edf4ac43 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java @@ -16,17 +16,25 @@ package com.mongodb.client; +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientException; import com.mongodb.MongoException; +import com.mongodb.TransactionOptions; import com.mongodb.client.internal.ClientSessionClock; +import com.mongodb.client.model.Sorts; import org.bson.Document; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.isServerlessTest; import static com.mongodb.ClusterFixture.isSharded; import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeFalse; @@ -162,6 +170,43 @@ public void testRetryTimeoutEnforcedTransientTransactionErrorOnCommit() { } } + // + // Ensure cannot override timeout in transaction + // + @Test + public void testTimeoutMS() { + try (ClientSession session = client.startSession(ClientSessionOptions.builder() + .defaultTransactionOptions(TransactionOptions.builder().timeout(TIMEOUT, TimeUnit.SECONDS).build()) + .build())) { + assertThrows(MongoClientException.class, () -> session.withTransaction(() -> { + collection.insertOne(session, Document.parse("{ _id : 1 }")); + collection.withTimeout(2, TimeUnit.MINUTES).find(session).first(); + return -1; + })); + } + } + + // + // Ensure legacy settings don't cause issues in sessions + // + @Test + public void testTimeoutMSAndLegacySettings() { + try (ClientSession session = client.startSession(ClientSessionOptions.builder() + .defaultTransactionOptions(TransactionOptions.builder().timeout(TIMEOUT, TimeUnit.SECONDS).build()) + .build())) { + Document document = Document.parse("{ _id : 1 }"); + Document returnValueFromCallback = session.withTransaction(() -> { + collection.insertOne(session, document); + Document found = collection.find(session) + .maxAwaitTime(1, TimeUnit.MINUTES) + .sort(Sorts.descending("_id")) + .first(); + return found != null ? found : new Document(); + }); + assertEquals(document, returnValueFromCallback); + } + } + private boolean canRunTests() { if (isSharded()) { return serverVersionAtLeast(4, 2); diff --git a/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java new file mode 100644 index 00000000000..31f72ca4332 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java @@ -0,0 +1,388 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.csot; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoUpdatedEncryptedFieldsException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.Fixture; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.applyTimeoutMultiplierForServerless; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.lessThan; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * See + * Prose Tests. + */ +public abstract class AbstractClientSideOperationsEncryptionTimeoutProseTest { + + protected static final String FAIL_COMMAND_NAME = "failCommand"; + private static final Map> KMS_PROVIDERS = new HashMap<>(); + + private final MongoNamespace keyVaultNamespace = new MongoNamespace("keyvault", "datakeys"); + + private CollectionHelper keyVaultCollectionHelper; + + private TestCommandListener commandListener; + + private static final String MASTER_KEY = "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5a" + + "XRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"; + + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings.Builder builder); + + protected abstract MongoClient createMongoClient(MongoClientSettings.Builder builder); + + @Test + void shouldThrowOperationTimeoutExceptionWhenCreateDataKey() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + Map> kmsProviders = new HashMap<>(); + Map localProviderMap = new HashMap<>(); + localProviderMap.put("key", Base64.getDecoder().decode(MASTER_KEY)); + kmsProviders.put("local", localProviderMap); + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 100))) { + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 100) + + " }" + + "}"); + + assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.createDataKey("local")); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals(keyVaultNamespace.getCollectionName(), + commandStartedEvents.get(0).getCommand().get("insert").asString().getValue()); + assertNotNull(commandListener.getCommandFailedEvent("insert")); + } + + } + + @Test + void shouldThrowOperationTimeoutExceptionWhenEncryptData() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 150))) { + + clientEncryption.createDataKey("local"); + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"find\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 150) + + " }" + + "}"); + + BsonBinary dataKey = clientEncryption.createDataKey("local"); + + EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"); + encryptOptions.keyId(dataKey); + commandListener.reset(); + assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.encrypt(new BsonString("hello"), encryptOptions)); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals(keyVaultNamespace.getCollectionName(), commandStartedEvents.get(0).getCommand().get("find").asString().getValue()); + assertNotNull(commandListener.getCommandFailedEvent("find")); + } + + } + + @Test + void shouldThrowOperationTimeoutExceptionWhenDecryptData() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + BsonBinary encrypted; + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 400))) { + clientEncryption.createDataKey("local"); + BsonBinary dataKey = clientEncryption.createDataKey("local"); + EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"); + encryptOptions.keyId(dataKey); + encrypted = clientEncryption.encrypt(new BsonString("hello"), encryptOptions); + } + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 400))) { + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"find\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 500) + + " }" + + "}"); + commandListener.reset(); + assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.decrypt(encrypted)); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals(keyVaultNamespace.getCollectionName(), commandStartedEvents.get(0).getCommand().get("find").asString().getValue()); + assertNotNull(commandListener.getCommandFailedEvent("find")); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + void shouldDecreaseOperationTimeoutForSubsequentOperations() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + long initialTimeoutMS = rtt + 2500; + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"insert\", \"find\", \"listCollections\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 10) + + " }" + + "}"); + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder() + .timeout(initialTimeoutMS, MILLISECONDS))) { + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + String base64DataKeyId = Base64.getEncoder().encodeToString(dataKeyId.getData()); + + final String dbName = "test"; + final String collName = "coll"; + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder() + .build()) + .kmsProviders(KMS_PROVIDERS) + .build(); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .timeout(initialTimeoutMS, MILLISECONDS))) { + + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions(); + createCollectionOptions.validationOptions(new ValidationOptions() + .validator(new BsonDocument("$jsonSchema", BsonDocument.parse("{" + + " properties: {" + + " encryptedField: {" + + " encrypt: {" + + " keyId: [{" + + " \"$binary\": {" + + " \"base64\": \"" + base64DataKeyId + "\"," + + " \"subType\": \"04\"" + + " }" + + " }]," + + " bsonType: \"string\"," + + " algorithm: \"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic\"" + + " }" + + " }" + + " }," + + " \"bsonType\": \"object\"" + + "}")))); + + MongoCollection collection = mongoClient.getDatabase(dbName).getCollection(collName); + collection.drop(); + + mongoClient.getDatabase(dbName).createCollection(collName, createCollectionOptions); + + commandListener.reset(); + collection.insertOne(new Document("encryptedField", "123456789")); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertTimeoutIsDecreasingForCommands(Arrays.asList("listCollections", "find", "insert"), commandStartedEvents, + initialTimeoutMS); + } + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @ParameterizedTest + @ValueSource(strings = {"insert", "create"}) + void shouldThrowTimeoutExceptionWhenCreateEncryptedCollection(final String commandToTimeout) { + assumeTrue(serverVersionAtLeast(7, 0)); + //given + long rtt = ClusterFixture.getPrimaryRTT(); + long initialTimeoutMS = rtt + applyTimeoutMultiplierForServerless(200); + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder() + .timeout(initialTimeoutMS, MILLISECONDS))) { + final String dbName = "test"; + final String collName = "coll"; + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(initialTimeoutMS, MILLISECONDS))) { + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions().encryptedFields(Document.parse( + "{" + + " fields: [{" + + " path: 'ssn'," + + " bsonType: 'string'," + + " keyId: null" + + " }]" + + "}")); + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"" + commandToTimeout + "\"]," + + " blockConnection: true," + + " blockTimeMS: " + initialTimeoutMS + + " }" + + "}"); + + MongoDatabase database = mongoClient.getDatabase(dbName); + database.getCollection(collName).drop(); + commandListener.reset(); + + //when + MongoUpdatedEncryptedFieldsException encryptionException = assertThrows(MongoUpdatedEncryptedFieldsException.class, () -> + clientEncryption.createEncryptedCollection(database, collName, createCollectionOptions, + new CreateEncryptedCollectionParams("local"))); + //then + assertInstanceOf(MongoOperationTimeoutException.class, encryptionException.getCause()); + } + } + } + + private static void assertTimeoutIsDecreasingForCommands(final List commandNames, + final List commandStartedEvents, + final long initialTimeoutMs) { + long previousMaxTimeMS = initialTimeoutMs; + assertEquals(commandNames.size(), commandStartedEvents.size(), "There have been more commands then expected"); + for (int i = 0; i < commandStartedEvents.size(); i++) { + CommandStartedEvent commandStartedEvent = commandStartedEvents.get(i); + String expectedCommandName = commandNames.get(i); + assertEquals(expectedCommandName, commandStartedEvent.getCommandName()); + + BsonDocument command = commandStartedEvent.getCommand(); + assertTrue(command.containsKey("maxTimeMS"), "Command " + expectedCommandName + " should have maxTimeMS set"); + + long maxTimeMS = command.getInt64("maxTimeMS").getValue(); + + if (i > 0) { + assertThat(commandStartedEvent.getCommandName() + " " + "maxTimeMS should be less than that of a previous " + + commandStartedEvents.get(i - 1).getCommandName() + " command", maxTimeMS, lessThan(previousMaxTimeMS)); + } else { + assertThat("maxTimeMS should be less than the configured timeout " + initialTimeoutMs + "ms", + maxTimeMS, lessThan(previousMaxTimeMS)); + } + previousMaxTimeMS = maxTimeMS; + } + } + + protected ClientEncryptionSettings.Builder getClientEncryptionSettingsBuilder(final long vaultTimeout) { + return ClientEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder() + .timeout(vaultTimeout, TimeUnit.MILLISECONDS).build()) + .kmsProviders(KMS_PROVIDERS); + } + + protected ClientEncryptionSettings.Builder getClientEncryptionSettingsBuilder() { + return ClientEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build()) + .kmsProviders(KMS_PROVIDERS); + } + + protected MongoClientSettings.Builder getMongoClientSettingsBuilder() { + return Fixture.getMongoClientSettingsBuilder() + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.primary()) + .addCommandListener(commandListener); + } + + @BeforeEach + public void setUp() { + Map localProviderMap = new HashMap<>(); + localProviderMap.put("key", Base64.getDecoder().decode(MASTER_KEY)); + KMS_PROVIDERS.put("local", localProviderMap); + + keyVaultCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), keyVaultNamespace); + keyVaultCollectionHelper.create(); + commandListener = new TestCommandListener(); + } + + @AfterEach + public void tearDown() { + ClusterFixture.disableFailPoint(FAIL_COMMAND_NAME); + if (keyVaultCollectionHelper != null) { + keyVaultCollectionHelper.drop(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java new file mode 100644 index 00000000000..25a1102914a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.csot; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideOperationsEncryptionTimeoutProseTest extends AbstractClientSideOperationsEncryptionTimeoutProseTest { + public ClientEncryption createClientEncryption(final ClientEncryptionSettings.Builder builder) { + return ClientEncryptions.create(builder.build()); + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings.Builder builder) { + return MongoClients.create(builder.build()); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java index f3aef9ec257..1890b2e48a3 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java @@ -27,9 +27,9 @@ import com.mongodb.ServerApi; import com.mongodb.ServerApiVersion; import com.mongodb.TransactionOptions; -import com.mongodb.WriteConcern; import com.mongodb.client.ClientSession; import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; import com.mongodb.client.MongoDatabase; @@ -64,6 +64,7 @@ import com.mongodb.internal.connection.TestServerListener; import com.mongodb.internal.logging.LogMessage; import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; import com.mongodb.logging.TestLoggingInterceptor; import org.bson.BsonArray; import org.bson.BsonBoolean; @@ -261,6 +262,18 @@ public MongoCollection getCollection(final String id) { return getEntity(id, collections, "collection"); } + public MongoCluster getMongoClusterWithTimeoutMS(final String id, @Nullable final Long timeoutMS) { + return timeoutMS != null ? getClient(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getClient(id); + } + + public MongoDatabase getDatabaseWithTimeoutMS(final String id, @Nullable final Long timeoutMS) { + return timeoutMS != null ? getDatabase(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getDatabase(id); + } + + public MongoCollection getCollectionWithTimeoutMS(final String id, @Nullable final Long timeoutMS) { + return timeoutMS != null ? getCollection(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getCollection(id); + } + public ClientSession getSession(final String id) { return getEntity(id, sessions, "session"); } @@ -471,11 +484,17 @@ private void initClient(final BsonDocument entity, final String id, break; case "w": if (value.isString()) { - clientSettingsBuilder.writeConcern(new WriteConcern(value.asString().getValue())); + clientSettingsBuilder.writeConcern(clientSettingsBuilder.build() + .getWriteConcern().withW(value.asString().getValue())); } else { - clientSettingsBuilder.writeConcern(new WriteConcern(value.asInt32().intValue())); + clientSettingsBuilder.writeConcern(clientSettingsBuilder.build() + .getWriteConcern().withW(value.asInt32().intValue())); } break; + case "wTimeoutMS": + clientSettingsBuilder.writeConcern(clientSettingsBuilder.build().getWriteConcern() + .withWTimeout(value.asNumber().longValue(), TimeUnit.MILLISECONDS)); + break; case "maxPoolSize": clientSettingsBuilder.applyToConnectionPoolSettings(builder -> builder.maxSize(value.asNumber().intValue())); break; @@ -519,6 +538,9 @@ private void initClient(final BsonDocument entity, final String id, case "appName": clientSettingsBuilder.applicationName(value.asString().getValue()); break; + case "timeoutMS": + clientSettingsBuilder.timeout(value.asNumber().longValue(), TimeUnit.MILLISECONDS); + break; case "serverMonitoringMode": clientSettingsBuilder.applyToServerSettings(builder -> builder.serverMonitoringMode( ServerMonitoringModeUtil.fromString(value.asString().getValue()))); @@ -631,6 +653,9 @@ private void initDatabase(final BsonDocument entity, final String id) { case "writeConcern": database = database.withWriteConcern(asWriteConcern(entry.getValue().asDocument())); break; + case "timeoutMS": + database = database.withTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; default: throw new UnsupportedOperationException("Unsupported database option: " + entry.getKey()); } @@ -655,6 +680,9 @@ private void initCollection(final BsonDocument entity, final String id) { case "writeConcern": collection = collection.withWriteConcern(asWriteConcern(entry.getValue().asDocument())); break; + case "timeoutMS": + collection = collection.withTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; default: throw new UnsupportedOperationException("Unsupported collection option: " + entry.getKey()); } @@ -675,6 +703,9 @@ private void initSession(final BsonDocument entity, final String id, final BsonD case "snapshot": optionsBuilder.snapshot(entry.getValue().asBoolean().getValue()); break; + case "defaultTimeoutMS": + optionsBuilder.defaultTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; case "causalConsistency": optionsBuilder.causallyConsistent(entry.getValue().asBoolean().getValue()); break; diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java index 7c0d340a9ad..75d264487f8 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java @@ -22,6 +22,7 @@ import com.mongodb.MongoException; import com.mongodb.MongoSecurityException; import com.mongodb.MongoExecutionTimeoutException; +import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoServerException; import com.mongodb.MongoSocketException; import com.mongodb.MongoWriteConcernException; @@ -42,7 +43,7 @@ final class ErrorMatcher { private static final Set EXPECTED_ERROR_FIELDS = new HashSet<>( asList("isError", "expectError", "isClientError", "errorCode", "errorCodeName", "errorContains", "errorResponse", - "isClientError", "errorLabelsOmit", "errorLabelsContain", "expectResult")); + "isClientError", "isTimeoutError", "errorLabelsOmit", "errorLabelsContain", "expectResult")); private final AssertionContext context; private final ValueMatcher valueMatcher; @@ -68,6 +69,14 @@ void assertErrorsMatch(final BsonDocument expectedError, final Exception e) { e instanceof MongoClientException || e instanceof IllegalArgumentException || e instanceof IllegalStateException || e instanceof MongoSocketException); } + + if (expectedError.containsKey("isTimeoutError")) { + assertEquals(context.getMessage("Exception must be of type MongoOperationTimeoutException when checking for results"), + expectedError.getBoolean("isTimeoutError").getValue(), + e instanceof MongoOperationTimeoutException + ); + } + if (expectedError.containsKey("errorContains")) { String errorContains = expectedError.getString("errorContains").getValue(); assertTrue(context.getMessage("Error message does not contain expected string: " + errorContains), diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java index 63e07ca2fb2..67f95903997 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java @@ -16,6 +16,7 @@ package com.mongodb.client.unified; +import com.mongodb.CursorType; import com.mongodb.MongoNamespace; import com.mongodb.ReadConcern; import com.mongodb.ReadConcernLevel; @@ -37,11 +38,12 @@ import com.mongodb.client.ListIndexesIterable; import com.mongodb.client.ListSearchIndexesIterable; import com.mongodb.client.MongoChangeStreamCursor; -import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; +import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.BulkWriteOptions; import com.mongodb.client.model.ChangeStreamPreAndPostImagesOptions; import com.mongodb.client.model.ClusteredIndexOptions; @@ -52,6 +54,7 @@ import com.mongodb.client.model.DeleteManyModel; import com.mongodb.client.model.DeleteOneModel; import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropIndexOptions; import com.mongodb.client.model.EstimatedDocumentCountOptions; import com.mongodb.client.model.FindOneAndDeleteOptions; import com.mongodb.client.model.FindOneAndReplaceOptions; @@ -93,6 +96,8 @@ import org.bson.codecs.ValueCodecProvider; import org.bson.codecs.configuration.CodecRegistries; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -106,7 +111,8 @@ import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.toList; -final class UnifiedCrudHelper { +@SuppressWarnings("deprecation") +final class UnifiedCrudHelper extends UnifiedHelper { private final Entities entities; private final String testDescription; private final AtomicInteger uniqueIdGenerator = new AtomicInteger(); @@ -217,13 +223,13 @@ private ClientSession getSession(final BsonDocument arguments) { OperationResult executeListDatabases(final BsonDocument operation) { - MongoClient client = entities.getClient(operation.getString("object").getValue()); + MongoCluster mongoCluster = getMongoCluster(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); ListDatabasesIterable iterable = session == null - ? client.listDatabases(BsonDocument.class) - : client.listDatabases(session, BsonDocument.class); + ? mongoCluster.listDatabases(BsonDocument.class) + : mongoCluster.listDatabases(session, BsonDocument.class); for (Map.Entry cur : arguments.entrySet()) { switch (cur.getKey()) { @@ -242,13 +248,13 @@ OperationResult executeListDatabases(final BsonDocument operation) { } OperationResult executeListDatabaseNames(final BsonDocument operation) { - MongoClient client = entities.getClient(operation.getString("object").getValue()); + MongoCluster mongoCluster = getMongoCluster(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); MongoIterable iterable = session == null - ? client.listDatabaseNames() - : client.listDatabaseNames(session); + ? mongoCluster.listDatabaseNames() + : mongoCluster.listDatabaseNames(session); for (Map.Entry cur : arguments.entrySet()) { //noinspection SwitchStatementWithTooFewBranches @@ -265,34 +271,40 @@ OperationResult executeListDatabaseNames(final BsonDocument operation) { } OperationResult executeListCollections(final BsonDocument operation) { - MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); - + MongoDatabase database = getMongoDatabase(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); ListCollectionsIterable iterable = session == null ? database.listCollections(BsonDocument.class) : database.listCollections(session, BsonDocument.class); - for (Map.Entry cur : arguments.entrySet()) { - switch (cur.getKey()) { - case "session": - break; - case "filter": - iterable.filter(cur.getValue().asDocument()); - break; - case "batchSize": - iterable.batchSize(cur.getValue().asNumber().intValue()); - break; - default: - throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + break; + case "filter": + iterable.filter(cur.getValue().asDocument()); + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } } - } - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()))); + return new BsonArray(iterable.into(new ArrayList<>())); + }); } OperationResult executeListCollectionNames(final BsonDocument operation) { - MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); + MongoDatabase database = getMongoDatabase(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); @@ -322,21 +334,21 @@ OperationResult executeListCollectionNames(final BsonDocument operation) { } OperationResult executeListIndexes(final BsonDocument operation) { - ListIndexesIterable iterable = createListIndexesIterable(operation); - - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()))); + return resultOf(() -> { + ListIndexesIterable iterable = createListIndexesIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>())); + }); } OperationResult executeListIndexNames(final BsonDocument operation) { - ListIndexesIterable iterable = createListIndexesIterable(operation); - - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()).stream().map(document -> document.getString("name")).collect(toList()))); + return resultOf(() -> { + ListIndexesIterable iterable = createListIndexesIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>()).stream().map(document -> document.getString("name")).collect(toList())); + }); } private ListIndexesIterable createListIndexesIterable(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); ListIndexesIterable iterable = session == null @@ -349,6 +361,12 @@ private ListIndexesIterable createListIndexesIterable(final BsonDo case "batchSize": iterable.batchSize(cur.getValue().asNumber().intValue()); break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; default: throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); } @@ -357,19 +375,19 @@ private ListIndexesIterable createListIndexesIterable(final BsonDo } OperationResult executeFind(final BsonDocument operation) { - FindIterable iterable = createFindIterable(operation); - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()))); + return resultOf(() -> { + FindIterable iterable = createFindIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>())); + }); } OperationResult executeFindOne(final BsonDocument operation) { - FindIterable iterable = createFindIterable(operation); - return resultOf(iterable::first); + return resultOf(() -> createFindIterable(operation).first()); } OperationResult createFindCursor(final BsonDocument operation) { - FindIterable iterable = createFindIterable(operation); return resultOf(() -> { + FindIterable iterable = createFindIterable(operation); entities.addCursor(operation.getString("saveResultAsEntity", new BsonString(createRandomEntityId())).getValue(), iterable.cursor()); return null; @@ -378,7 +396,7 @@ OperationResult createFindCursor(final BsonDocument operation) { @NonNull private FindIterable createFindIterable(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); BsonDocument filter = arguments.getDocument("filter"); @@ -400,6 +418,9 @@ private FindIterable createFindIterable(final BsonDocument operati case "maxTimeMS": iterable.maxTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); break; + case "maxAwaitTimeMS": + iterable.maxAwaitTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); + break; case "skip": iterable.skip(cur.getValue().asInt32().intValue()); break; @@ -437,6 +458,12 @@ private FindIterable createFindIterable(final BsonDocument operati case "showRecordId": iterable.showRecordId(cur.getValue().asBoolean().getValue()); break; + case "cursorType": + setCursorType(iterable, cur); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; default: throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); } @@ -444,8 +471,9 @@ private FindIterable createFindIterable(final BsonDocument operati return iterable; } + @SuppressWarnings("deprecation") //maxTimeMS OperationResult executeDistinct(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); @@ -465,6 +493,9 @@ OperationResult executeDistinct(final BsonDocument operation) { case "filter": iterable.filter(cur.getValue().asDocument()); break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; case "collation": iterable.collation(asCollation(cur.getValue().asDocument())); break; @@ -479,8 +510,8 @@ OperationResult executeDistinct(final BsonDocument operation) { @SuppressWarnings("deprecation") OperationResult executeMapReduce(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); - BsonDocument arguments = operation.getDocument("arguments"); + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); String mapFunction = arguments.get("map").asJavaScript().getCode(); @@ -509,8 +540,9 @@ OperationResult executeMapReduce(final BsonDocument operation) { new BsonArray(iterable.into(new ArrayList<>()))); } + @SuppressWarnings("deprecation") //maxTimeMS OperationResult executeFindOneAndUpdate(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument filter = arguments.getDocument("filter").asDocument(); @@ -558,6 +590,9 @@ OperationResult executeFindOneAndUpdate(final BsonDocument operation) { case "let": options.let(cur.getValue().asDocument()); break; + case "maxTimeMS": + options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; case "collation": options.collation(asCollation(cur.getValue().asDocument())); break; @@ -585,8 +620,9 @@ OperationResult executeFindOneAndUpdate(final BsonDocument operation) { }); } + @SuppressWarnings("deprecation") OperationResult executeFindOneAndReplace(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); BsonDocument filter = arguments.getDocument("filter").asDocument(); @@ -633,6 +669,9 @@ OperationResult executeFindOneAndReplace(final BsonDocument operation) { case "let": options.let(cur.getValue().asDocument()); break; + case "maxTimeMS": + options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; case "collation": options.collation(asCollation(cur.getValue().asDocument())); break; @@ -650,8 +689,9 @@ OperationResult executeFindOneAndReplace(final BsonDocument operation) { }); } + @SuppressWarnings("deprecation") //maxTimeMS OperationResult executeFindOneAndDelete(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); BsonDocument filter = arguments.getDocument("filter").asDocument(); @@ -684,6 +724,9 @@ OperationResult executeFindOneAndDelete(final BsonDocument operation) { case "let": options.let(cur.getValue().asDocument()); break; + case "maxTimeMS": + options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; default: throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); } @@ -700,53 +743,61 @@ OperationResult executeFindOneAndDelete(final BsonDocument operation) { OperationResult executeAggregate(final BsonDocument operation) { String entityName = operation.getString("object").getValue(); - BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); List pipeline = arguments.getArray("pipeline").stream().map(BsonValue::asDocument).collect(toList()); AggregateIterable iterable; if (entities.hasDatabase(entityName)) { + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments")); + MongoDatabase database = entities.getDatabaseWithTimeoutMS(entityName, timeoutMS); iterable = session == null - ? entities.getDatabase(entityName).aggregate(requireNonNull(pipeline), BsonDocument.class) - : entities.getDatabase(entityName).aggregate(session, requireNonNull(pipeline), BsonDocument.class); + ? database.aggregate(requireNonNull(pipeline), BsonDocument.class) + : database.aggregate(session, requireNonNull(pipeline), BsonDocument.class); } else if (entities.hasCollection(entityName)) { + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments")); + MongoCollection collection = entities.getCollectionWithTimeoutMS(entityName, timeoutMS); iterable = session == null - ? entities.getCollection(entityName).aggregate(requireNonNull(pipeline)) - : entities.getCollection(entityName).aggregate(session, requireNonNull(pipeline)); + ? collection.aggregate(requireNonNull(pipeline)) + : collection.aggregate(session, requireNonNull(pipeline)); } else { throw new UnsupportedOperationException("Unsupported entity type with name: " + entityName); } - for (Map.Entry cur : arguments.entrySet()) { - switch (cur.getKey()) { - case "pipeline": - case "session": - break; - case "batchSize": - iterable.batchSize(cur.getValue().asNumber().intValue()); - break; - case "allowDiskUse": - iterable.allowDiskUse(cur.getValue().asBoolean().getValue()); - break; - case "let": - iterable.let(cur.getValue().asDocument()); - break; - case "comment": - iterable.comment(cur.getValue()); - break; - case "maxTimeMS": - iterable.maxTime(cur.getValue().asNumber().intValue(), TimeUnit.MILLISECONDS); - break; - case "collation": - iterable.collation(asCollation(cur.getValue().asDocument())); - break; - default: - throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); - } - } - String lastStageName = pipeline.isEmpty() ? null : pipeline.get(pipeline.size() - 1).getFirstKey(); - boolean useToCollection = Objects.equals(lastStageName, "$out") || Objects.equals(lastStageName, "$merge"); - return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "pipeline": + case "session": + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "allowDiskUse": + iterable.allowDiskUse(cur.getValue().asBoolean().getValue()); + break; + case "let": + iterable.let(cur.getValue().asDocument()); + break; + case "collation": + iterable.collation(asCollation(cur.getValue().asDocument())); + break; + case "comment": + iterable.comment(cur.getValue()); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "maxAwaitTimeMS": + iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + String lastStageName = pipeline.isEmpty() ? null : pipeline.get(pipeline.size() - 1).getFirstKey(); + boolean useToCollection = Objects.equals(lastStageName, "$out") || Objects.equals(lastStageName, "$merge"); if (!pipeline.isEmpty() && useToCollection) { iterable.toCollection(); return null; @@ -757,7 +808,7 @@ OperationResult executeAggregate(final BsonDocument operation) { } OperationResult executeDeleteOne(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument filter = arguments.getDocument("filter"); ClientSession session = getSession(arguments); @@ -773,7 +824,7 @@ OperationResult executeDeleteOne(final BsonDocument operation) { } OperationResult executeDeleteMany(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument filter = arguments.getDocument("filter"); ClientSession session = getSession(arguments); @@ -797,7 +848,7 @@ private BsonDocument toExpected(final DeleteResult result) { } OperationResult executeUpdateOne(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); BsonDocument filter = arguments.getDocument("filter"); @@ -821,7 +872,7 @@ OperationResult executeUpdateOne(final BsonDocument operation) { } OperationResult executeUpdateMany(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument filter = arguments.getDocument("filter"); BsonValue update = arguments.get("update"); @@ -844,7 +895,7 @@ OperationResult executeUpdateMany(final BsonDocument operation) { } OperationResult executeReplaceOne(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); BsonDocument filter = arguments.getDocument("filter"); @@ -877,7 +928,7 @@ private BsonDocument toExpected(final UpdateResult result) { OperationResult executeInsertOne(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); BsonDocument document = arguments.getDocument("document").asDocument(); @@ -911,7 +962,7 @@ private BsonDocument toExpected(final InsertOneResult result) { } OperationResult executeInsertMany(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); List documents = arguments.getArray("documents").stream().map(BsonValue::asDocument).collect(toList()); ClientSession session = getSession(arguments); @@ -952,7 +1003,7 @@ private BsonDocument toExpected(final InsertManyResult result) { } OperationResult executeBulkWrite(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); List> requests = arguments.getArray("requests").stream() @@ -1156,6 +1207,9 @@ OperationResult executeStartTransaction(final BsonDocument operation) { case "readConcern": optionsBuilder.readConcern(asReadConcern(cur.getValue().asDocument())); break; + case "timeoutMS": + optionsBuilder.timeout(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); + break; case "maxCommitTimeMS": optionsBuilder.maxCommitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); break; @@ -1174,7 +1228,7 @@ OperationResult executeCommitTransaction(final BsonDocument operation) { ClientSession session = entities.getSession(operation.getString("object").getValue()); if (operation.containsKey("arguments")) { - throw new UnsupportedOperationException("Unexpected arguments"); + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); } return resultOf(() -> { @@ -1187,7 +1241,7 @@ OperationResult executeAbortTransaction(final BsonDocument operation) { ClientSession session = entities.getSession(operation.getString("object").getValue()); if (operation.containsKey("arguments")) { - throw new UnsupportedOperationException("Unexpected arguments"); + throw new UnsupportedOperationException("Unexpected arguments: " + operation.get("arguments")); } return resultOf(() -> { @@ -1210,6 +1264,9 @@ OperationResult executeWithTransaction(final BsonDocument operation, final Opera case "writeConcern": optionsBuilder.writeConcern(asWriteConcern(entry.getValue().asDocument())); break; + case "timeoutMS": + optionsBuilder.timeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; case "maxCommitTimeMS": optionsBuilder.maxCommitTime(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); break; @@ -1232,12 +1289,12 @@ OperationResult executeWithTransaction(final BsonDocument operation, final Opera } public OperationResult executeDropCollection(final BsonDocument operation) { - MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); + MongoDatabase database = getMongoDatabase(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); String collectionName = arguments.getString("collection").getValue(); - if (operation.getDocument("arguments", new BsonDocument()).size() > 1) { - throw new UnsupportedOperationException("Unexpected arguments"); + if (operation.getDocument("arguments").size() > 1) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); } return resultOf(() -> { @@ -1247,7 +1304,7 @@ public OperationResult executeDropCollection(final BsonDocument operation) { } public OperationResult executeCreateCollection(final BsonDocument operation) { - MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); + MongoDatabase database = getMongoDatabase(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); String collectionName = arguments.getString("collection").getValue(); ClientSession session = getSession(arguments); @@ -1317,7 +1374,7 @@ public OperationResult executeCreateCollection(final BsonDocument operation) { } public OperationResult executeModifyCollection(final BsonDocument operation) { - MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); + MongoDatabase database = getMongoDatabase(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); String collectionName = arguments.getString("collection").getValue(); ClientSession session = getSession(arguments); @@ -1350,7 +1407,7 @@ public OperationResult executeModifyCollection(final BsonDocument operation) { } public OperationResult executeRenameCollection(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); String newCollectionName = arguments.getString("to").getValue(); ClientSession session = getSession(arguments); @@ -1448,7 +1505,7 @@ private TimeSeriesGranularity createTimeSeriesGranularity(final String value) { OperationResult executeCreateSearchIndex(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument model = arguments.getDocument("model"); BsonDocument definition = model.getDocument("definition"); @@ -1465,7 +1522,7 @@ OperationResult executeCreateSearchIndex(final BsonDocument operation) { } OperationResult executeCreateSearchIndexes(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonArray models = arguments.getArray("models"); @@ -1480,7 +1537,7 @@ OperationResult executeCreateSearchIndexes(final BsonDocument operation) { OperationResult executeUpdateSearchIndex(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument definition = arguments.getDocument("definition"); String name = arguments.getString("name").getValue(); @@ -1492,7 +1549,7 @@ OperationResult executeUpdateSearchIndex(final BsonDocument operation) { } OperationResult executeDropSearchIndex(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); String name = arguments.getString("name").getValue(); @@ -1516,7 +1573,7 @@ private static SearchIndexModel toIndexSearchModel(final BsonValue bsonValue) { OperationResult executeListSearchIndexes(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); Optional arguments = Optional.ofNullable(operation.getOrDefault("arguments", null)).map(BsonValue::asDocument); if (arguments.isPresent()) { @@ -1555,7 +1612,7 @@ private ListSearchIndexesIterable createListSearchIndexesIterable( } public OperationResult executeCreateIndex(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument keys = arguments.getDocument("keys").asDocument(); ClientSession session = getSession(arguments); @@ -1588,27 +1645,63 @@ public OperationResult executeCreateIndex(final BsonDocument operation) { } public OperationResult executeDropIndex(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); String indexName = arguments.get("name").asString().getValue(); + + if (!arguments.containsKey("name")) { + throw new UnsupportedOperationException("Drop index without name is not supported"); + } + + DropIndexOptions options = getDropIndexOptions(arguments); + return resultOf(() -> { + if (session == null) { + collection.dropIndex(indexName, options); + } else { + collection.dropIndex(session, indexName, options); + } + return null; + }); + } + + public OperationResult executeDropIndexes(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + + if (operation.containsKey("arguments")) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + DropIndexOptions options = getDropIndexOptions(arguments); + return resultOf(() -> { + if (session == null) { + collection.dropIndexes(options); + } else { + collection.dropIndexes(session, options); + } + return null; + }); + } + return resultOf(() -> { + collection.dropIndexes(); + return null; + }); + } + + private static DropIndexOptions getDropIndexOptions(final BsonDocument arguments) { + DropIndexOptions options = new DropIndexOptions(); for (Map.Entry cur : arguments.entrySet()) { switch (cur.getKey()) { case "session": case "name": break; + case "maxTimeMS": + options.maxTime(cur.getValue().asNumber().intValue(), TimeUnit.MILLISECONDS); + break; default: throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); } } - return resultOf(() -> { - if (session == null) { - collection.dropIndex(indexName); - } else { - collection.dropIndex(session, indexName); - } - return null; - }); + return options; } public OperationResult createChangeStreamCursor(final BsonDocument operation) { @@ -1616,43 +1709,48 @@ public OperationResult createChangeStreamCursor(final BsonDocument operation) { BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); List pipeline = arguments.getArray("pipeline").stream().map(BsonValue::asDocument).collect(toList()); ChangeStreamIterable iterable; + + Long timeoutMS = arguments.containsKey("timeoutMS") ? arguments.remove("timeoutMS").asNumber().longValue() : null; if (entities.hasCollection(entityName)) { - iterable = entities.getCollection(entityName).watch(pipeline); + iterable = entities.getCollectionWithTimeoutMS(entityName, timeoutMS).watch(pipeline); } else if (entities.hasDatabase(entityName)) { - iterable = entities.getDatabase(entityName).watch(pipeline, BsonDocument.class); + iterable = entities.getDatabaseWithTimeoutMS(entityName, timeoutMS).watch(pipeline, BsonDocument.class); } else if (entities.hasClient(entityName)) { - iterable = entities.getClient(entityName).watch(pipeline, BsonDocument.class); + iterable = entities.getMongoClusterWithTimeoutMS(entityName, timeoutMS).watch(pipeline, BsonDocument.class); } else { throw new UnsupportedOperationException("No entity found for id: " + entityName); } - for (Map.Entry cur : arguments.entrySet()) { - switch (cur.getKey()) { - case "batchSize": - iterable.batchSize(cur.getValue().asNumber().intValue()); - break; - case "pipeline": - break; - case "comment": - iterable.comment(cur.getValue()); - break; - case "fullDocument": - iterable.fullDocument(FullDocument.fromString(cur.getValue().asString().getValue())); - break; - case "fullDocumentBeforeChange": - iterable.fullDocumentBeforeChange(FullDocumentBeforeChange.fromString(cur.getValue().asString().getValue())); - break; - case "showExpandedEvents": - iterable.showExpandedEvents(cur.getValue().asBoolean().getValue()); - break; - default: - throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); - } - } - return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "pipeline": + break; + case "comment": + iterable.comment(cur.getValue()); + break; + case "fullDocument": + iterable.fullDocument(FullDocument.fromString(cur.getValue().asString().getValue())); + break; + case "fullDocumentBeforeChange": + iterable.fullDocumentBeforeChange(FullDocumentBeforeChange.fromString(cur.getValue().asString().getValue())); + break; + case "maxAwaitTimeMS": + iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "showExpandedEvents": + iterable.showExpandedEvents(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + MongoCursor changeStreamWrappingCursor = createChangeStreamWrappingCursor(iterable); entities.addCursor(operation.getString("saveResultAsEntity", - new BsonString(createRandomEntityId())).getValue(), createChangeStreamWrappingCursor(iterable)); + new BsonString(createRandomEntityId())).getValue(), changeStreamWrappingCursor); return null; }); } @@ -1662,12 +1760,24 @@ public OperationResult executeIterateUntilDocumentOrError(final BsonDocument ope MongoCursor cursor = entities.getCursor(id); if (operation.containsKey("arguments")) { - throw new UnsupportedOperationException("Unexpected arguments"); + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); } return resultOf(cursor::next); } + + public OperationResult executeIterateOnce(final BsonDocument operation) { + String id = operation.getString("object").getValue(); + MongoCursor cursor = entities.getCursor(id); + + if (operation.containsKey("arguments")) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); + } + + return resultOf(cursor::tryNext); + } + public OperationResult close(final BsonDocument operation) { String id = operation.getString("object").getValue(); @@ -1682,7 +1792,7 @@ public OperationResult close(final BsonDocument operation) { } public OperationResult executeRunCommand(final BsonDocument operation) { - MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); + MongoDatabase database = getMongoDatabase(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); ClientSession session = getSession(arguments); BsonDocument command = arguments.getDocument("command"); @@ -1718,7 +1828,7 @@ public OperationResult executeRunCommand(final BsonDocument operation) { } public OperationResult executeCountDocuments(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument filter = arguments.getDocument("filter"); ClientSession session = getSession(arguments); @@ -1756,7 +1866,7 @@ public OperationResult executeCountDocuments(final BsonDocument operation) { } public OperationResult executeEstimatedDocumentCount(final BsonDocument operation) { - MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); EstimatedDocumentCountOptions options = new EstimatedDocumentCountOptions(); @@ -1851,4 +1961,85 @@ private BsonDocument encodeChangeStreamDocumentToBsonDocument(final ChangeStream }; } } + + private MongoCollection getMongoCollection(final BsonDocument operation) { + MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments", new BsonDocument())); + if (timeoutMS != null) { + collection = collection.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + } + return collection; + } + private MongoDatabase getMongoDatabase(final BsonDocument operation) { + MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); + if (operation.containsKey("arguments")) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + Long timeoutMS = getAndRemoveTimeoutMS(arguments); + if (timeoutMS != null) { + database = database.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + arguments.remove("timeoutMS"); + } + } + return database; + } + + private MongoCluster getMongoCluster(final BsonDocument operation) { + MongoCluster mongoCluster = entities.getClient(operation.getString("object").getValue()); + if (operation.containsKey("arguments")) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + Long timeoutMS = getAndRemoveTimeoutMS(arguments); + if (timeoutMS != null) { + mongoCluster = mongoCluster.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + arguments.remove("timeoutMS"); + } + } + return mongoCluster; + } + + private static void setCursorType(final FindIterable iterable, final Map.Entry cur) { + switch (cur.getValue().asString().getValue()) { + case "tailable": + iterable.cursorType(CursorType.Tailable); + break; + case "nonTailable": + iterable.cursorType(CursorType.NonTailable); + break; + case "tailableAwait": + iterable.cursorType(CursorType.TailableAwait); + break; + default: + throw new UnsupportedOperationException("Unsupported cursorType: " + cur.getValue()); + } + } + + private static void setTimeoutMode(final MongoIterable iterable, final Map.Entry cur) { + switch (cur.getValue().asString().getValue()) { + case "cursorLifetime": + invokeTimeoutMode(iterable, TimeoutMode.CURSOR_LIFETIME); + break; + case "iteration": + invokeTimeoutMode(iterable, TimeoutMode.ITERATION); + break; + default: + throw new UnsupportedOperationException("Unsupported timeoutMode: " + cur.getValue()); + } + } + + private static void invokeTimeoutMode(final MongoIterable iterable, final TimeoutMode timeoutMode) { + try { + Method timeoutModeMethod = iterable.getClass().getDeclaredMethod("timeoutMode", TimeoutMode.class); + timeoutModeMethod.setAccessible(true); + timeoutModeMethod.invoke(iterable, timeoutMode); + } catch (NoSuchMethodException e) { + throw new UnsupportedOperationException("Unsupported timeoutMode method for class: " + iterable.getClass(), e); + } catch (IllegalAccessException e) { + throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), e); + } catch (InvocationTargetException e) { + Throwable targetException = e.getTargetException(); + if (targetException instanceof IllegalArgumentException) { + throw (IllegalArgumentException) targetException; + } + throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), targetException); + } + } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java index 59ae4e2f0e5..13e95a58463 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java @@ -17,7 +17,9 @@ package com.mongodb.client.unified; import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSFindIterable; import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSFile; import com.mongodb.client.gridfs.model.GridFSUploadOptions; import com.mongodb.internal.HexUtils; import org.bson.BsonDocument; @@ -32,25 +34,61 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; +import java.util.ArrayList; import java.util.Map; +import java.util.concurrent.TimeUnit; import static java.util.Objects.requireNonNull; -final class UnifiedGridFSHelper { +final class UnifiedGridFSHelper extends UnifiedHelper{ private final Entities entities; UnifiedGridFSHelper(final Entities entities) { this.entities = entities; } + public OperationResult executeFind(final BsonDocument operation) { + GridFSFindIterable iterable = createGridFSFindIterable(operation); + try { + ArrayList target = new ArrayList<>(); + iterable.into(target); + + if (target.isEmpty()) { + return OperationResult.NONE; + } + + throw new UnsupportedOperationException("expectResult is not implemented for Unified GridFS tests. " + + "Unexpected result: " + target); + } catch (Exception e) { + return OperationResult.of(e); + } + } + + public OperationResult executeRename(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + BsonDocument arguments = operation.getDocument("arguments"); + BsonValue id = arguments.get("id"); + String fileName = arguments.get("newFilename").asString().getValue(); + + requireNonNull(id); + requireNonNull(fileName); + + try { + bucket.rename(id, fileName); + return OperationResult.NONE; + } catch (Exception e) { + return OperationResult.of(e); + } + } + OperationResult executeDelete(final BsonDocument operation) { - GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue()); + GridFSBucket bucket = getGridFsBucket(operation); BsonDocument arguments = operation.getDocument("arguments"); BsonValue id = arguments.get("id"); if (arguments.size() > 1) { - throw new UnsupportedOperationException("Unexpected arguments"); + throw new UnsupportedOperationException("Unexpected arguments " + arguments); } requireNonNull(id); @@ -63,14 +101,29 @@ OperationResult executeDelete(final BsonDocument operation) { } } + public OperationResult executeDrop(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + if (arguments.size() > 0) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); + } + + try { + bucket.drop(); + return OperationResult.NONE; + } catch (Exception e) { + return OperationResult.of(e); + } + } + public OperationResult executeDownload(final BsonDocument operation) { - GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue()); + GridFSBucket bucket = getGridFsBucket(operation); BsonDocument arguments = operation.getDocument("arguments"); BsonValue id = arguments.get("id"); if (arguments.size() > 1) { - throw new UnsupportedOperationException("Unexpected arguments"); + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); } requireNonNull(id); @@ -119,7 +172,7 @@ private GridFSDownloadOptions getDownloadOptions(final BsonDocument arguments) { } public OperationResult executeUpload(final BsonDocument operation) { - GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue()); + GridFSBucket bucket = getGridFsBucket(operation); BsonDocument arguments = operation.getDocument("arguments"); String filename = null; @@ -165,4 +218,46 @@ public OperationResult executeUpload(final BsonDocument operation) { Document asDocument(final BsonDocument bsonDocument) { return new DocumentCodec().decode(new BsonDocumentReader(bsonDocument), DecoderContext.builder().build()); } + + private GridFSBucket getGridFsBucket(final BsonDocument operation) { + GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue()); + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments", new BsonDocument())); + if (timeoutMS != null) { + bucket = bucket.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + } + return bucket; + } + + private GridFSFindIterable createGridFSFindIterable(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + + BsonDocument arguments = operation.getDocument("arguments"); + BsonDocument filter = arguments.getDocument("filter"); + GridFSFindIterable iterable = bucket.find(filter); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + case "filter": + break; + case "sort": + iterable.sort(cur.getValue().asDocument()); + break; + case "batchSize": + iterable.batchSize(cur.getValue().asInt32().intValue()); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); + break; + case "skip": + iterable.skip(cur.getValue().asInt32().intValue()); + break; + case "limit": + iterable.limit(cur.getValue().asInt32().intValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return iterable; + } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java new file mode 100644 index 00000000000..027ccf92fb5 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.bson.BsonDocument; + +abstract class UnifiedHelper { + + static Long getAndRemoveTimeoutMS(final BsonDocument arguments) { + Long timeoutMS = null; + if (arguments.containsKey("timeoutMS")) { + timeoutMS = arguments.getNumber("timeoutMS").longValue(); + arguments.remove("timeoutMS"); + } + return timeoutMS; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java index e88abd6669f..ae7ad39a2f5 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java @@ -146,10 +146,6 @@ LogMatcher getLogMatcher() { protected UnifiedTest() { } - protected void ignoreExtraEvents() { - ignoreExtraEvents = true; - } - public Entities getEntities() { return entities; } @@ -380,6 +376,7 @@ private void assertOperation(final UnifiedTestContext context, final BsonDocumen private static void assertOperationResult(final UnifiedTestContext context, final BsonDocument operation, final int operationIndex, final OperationResult result) { context.getAssertionContext().push(ContextElement.ofCompletedOperation(operation, result, operationIndex)); + if (!operation.getBoolean("ignoreResultAndError", BsonBoolean.FALSE).getValue()) { if (operation.containsKey("expectResult")) { assertNull(result.getException(), @@ -400,6 +397,7 @@ private static void assertOperationResult(final UnifiedTestContext context, fina private OperationResult executeOperation(final UnifiedTestContext context, final BsonDocument operation, final int operationNum) { context.getAssertionContext().push(ContextElement.ofStartedOperation(operation, operationNum)); String name = operation.getString("name").getValue(); + String object = operation.getString("object").getValue(); try { switch (name) { case "createEntities": @@ -469,6 +467,9 @@ private OperationResult executeOperation(final UnifiedTestContext context, final case "aggregate": return crudHelper.executeAggregate(operation); case "find": + if ("bucket".equals(object)){ + return gridFSHelper.executeFind(operation); + } return crudHelper.executeFind(operation); case "findOne": return crudHelper.executeFindOne(operation); @@ -505,6 +506,9 @@ private OperationResult executeOperation(final UnifiedTestContext context, final case "modifyCollection": return crudHelper.executeModifyCollection(operation); case "rename": + if ("bucket".equals(object)){ + return gridFSHelper.executeRename(operation); + } return crudHelper.executeRenameCollection(operation); case "createSearchIndex": return crudHelper.executeCreateSearchIndex(operation); @@ -520,6 +524,8 @@ private OperationResult executeOperation(final UnifiedTestContext context, final return crudHelper.executeCreateIndex(operation); case "dropIndex": return crudHelper.executeDropIndex(operation); + case "dropIndexes": + return crudHelper.executeDropIndexes(operation); case "startTransaction": return crudHelper.executeStartTransaction(operation); case "commitTransaction": @@ -536,8 +542,12 @@ private OperationResult executeOperation(final UnifiedTestContext context, final return crudHelper.close(operation); case "iterateUntilDocumentOrError": return crudHelper.executeIterateUntilDocumentOrError(operation); + case "iterateOnce": + return crudHelper.executeIterateOnce(operation); case "delete": return gridFSHelper.executeDelete(operation); + case "drop": + return gridFSHelper.executeDrop(operation); case "download": return gridFSHelper.executeDownload(operation); case "downloadByName": @@ -910,7 +920,7 @@ private OperationResult executeAssertLsidOnLastTwoCommands(final BsonDocument op operation.getDocument("arguments").getString("client").getValue()); List events = lastTwoCommandEvents(listener); String eventsJson = listener.getCommandStartedEvents().stream() - .map(e -> ((CommandStartedEvent) e).getCommand().toJson()) + .map(e -> e.getCommand().toJson()) .collect(Collectors.joining(", ")); BsonDocument expected = ((CommandStartedEvent) events.get(0)).getCommand().getDocument("lsid"); BsonDocument actual = ((CommandStartedEvent) events.get(1)).getCommand().getDocument("lsid"); @@ -976,9 +986,9 @@ private boolean indexExists(final BsonDocument operation) { } private List lastTwoCommandEvents(final TestCommandListener listener) { - List events = listener.getCommandStartedEvents(); + List events = listener.getCommandStartedEvents(); assertTrue(events.size() >= 2); - return events.subList(events.size() - 2, events.size()); + return new ArrayList<>(events.subList(events.size() - 2, events.size())); } private BsonDocument addInitialDataAndGetClusterTime() { @@ -988,7 +998,7 @@ private BsonDocument addInitialDataAndGetClusterTime() { new MongoNamespace(curDataSet.getString("databaseName").getValue(), curDataSet.getString("collectionName").getValue())); - helper.create(WriteConcern.MAJORITY); + helper.create(WriteConcern.MAJORITY, curDataSet.getDocument("createOptions", new BsonDocument())); BsonArray documentsArray = curDataSet.getArray("documents", new BsonArray()); if (!documentsArray.isEmpty()) { @@ -998,4 +1008,12 @@ private BsonDocument addInitialDataAndGetClusterTime() { } return getCurrentClusterTime(); } + + protected void ignoreExtraCommandEvents(final boolean ignoreExtraEvents) { + this.ignoreExtraEvents = ignoreExtraEvents; + } + + protected void ignoreExtraEvents() { + this.ignoreExtraEvents = true; + } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java index fb8b0520d26..899769d2d9f 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java @@ -122,6 +122,10 @@ private void assertValuesMatch(final BsonValue initialExpected, @Nullable final actualValue = BsonDocument.parse(actualValue.asString().getValue()); value = value.asDocument().getDocument("$$matchAsDocument"); break; + case "$$lte": + value = value.asDocument().getNumber("$$lte"); + assertTrue(actualValue.asNumber().longValue() <= value.asNumber().longValue()); + return; default: throw new UnsupportedOperationException("Unsupported special operator: " + value.asDocument().getFirstKey()); } diff --git a/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy index 80eced15c60..a947effd36f 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy @@ -32,6 +32,7 @@ import com.mongodb.connection.ClusterType import com.mongodb.connection.ServerConnectionState import com.mongodb.connection.ServerDescription import com.mongodb.connection.ServerType +import com.mongodb.internal.TimeoutSettings import com.mongodb.internal.client.model.changestream.ChangeStreamLevel import com.mongodb.internal.connection.Cluster import org.bson.BsonDocument @@ -46,6 +47,7 @@ import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry import static com.mongodb.ReadPreference.primary import static com.mongodb.ReadPreference.secondary import static com.mongodb.client.internal.TestHelper.execute +import static java.util.concurrent.TimeUnit.SECONDS import static org.bson.UuidRepresentation.C_SHARP_LEGACY import static org.bson.UuidRepresentation.UNSPECIFIED import static org.bson.codecs.configuration.CodecRegistries.fromProviders @@ -54,7 +56,8 @@ import static spock.util.matcher.HamcrestSupport.expect class MongoClientSpecification extends Specification { - private static CodecRegistry codecRegistry = fromProviders(new ValueCodecProvider()) + private static final CodecRegistry CODEC_REGISTRY = fromProviders(new ValueCodecProvider()) + private static final TimeoutSettings TIMEOUT_SETTINGS = new TimeoutSettings(30_000, 10_000, 0, null, SECONDS.toMillis(120)) def 'should pass the correct settings to getDatabase'() { given: @@ -63,7 +66,7 @@ class MongoClientSpecification extends Specification { .writeConcern(WriteConcern.MAJORITY) .readConcern(ReadConcern.MAJORITY) .retryWrites(true) - .codecRegistry(codecRegistry) + .codecRegistry(CODEC_REGISTRY) .build() def client = new MongoClientImpl(Stub(Cluster), null, settings, new TestOperationExecutor([])) @@ -74,8 +77,9 @@ class MongoClientSpecification extends Specification { expect database, isTheSameAs(expectedDatabase) where: - expectedDatabase << new MongoDatabaseImpl('name', withUuidRepresentation(codecRegistry, UNSPECIFIED), secondary(), - WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, new TestOperationExecutor([])) + expectedDatabase << new MongoDatabaseImpl('name', withUuidRepresentation(CODEC_REGISTRY, UNSPECIFIED), secondary(), + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, + TIMEOUT_SETTINGS, new TestOperationExecutor([])) } def 'should use ListDatabasesIterableImpl correctly'() { @@ -90,14 +94,14 @@ class MongoClientSpecification extends Specification { then: expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, Document, - withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true)) + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS)) when: listDatabasesIterable = execute(listDatabasesMethod, session, BsonDocument) then: expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, - withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true)) + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS)) when: def listDatabaseNamesIterable = execute(listDatabasesNamesMethod, session) as MongoIterable @@ -105,7 +109,8 @@ class MongoClientSpecification extends Specification { then: // listDatabaseNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it expect listDatabaseNamesIterable.getMapped(), isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, - withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true).nameOnly(true)) + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS) + .nameOnly(true)) cleanup: client?.close() @@ -134,7 +139,7 @@ class MongoClientSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), - readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true), + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), ['codec']) when: @@ -144,7 +149,7 @@ class MongoClientSpecification extends Specification { expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), readPreference, readConcern, executor, [new Document('$match', 1)], Document, ChangeStreamLevel.CLIENT, - true), ['codec']) + true, TIMEOUT_SETTINGS), ['codec']) when: changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) @@ -153,7 +158,7 @@ class MongoClientSpecification extends Specification { expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, - ChangeStreamLevel.CLIENT, true), ['codec']) + ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), ['codec']) where: session << [null, Stub(ClientSession)] diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy index 7ae3e568bf4..cb34236c627 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy @@ -33,6 +33,7 @@ import com.mongodb.client.internal.OperationExecutor import com.mongodb.client.internal.TestOperationExecutor import com.mongodb.client.result.DeleteResult import com.mongodb.client.result.UpdateResult +import com.mongodb.internal.TimeoutSettings import com.mongodb.internal.operation.BatchCursor import com.mongodb.internal.operation.FindOperation import org.bson.BsonBinary @@ -46,6 +47,9 @@ import org.bson.types.ObjectId import spock.lang.Specification import spock.lang.Unroll +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.primary import static com.mongodb.ReadPreference.secondary @@ -61,7 +65,7 @@ class GridFSBucketSpecification extends Specification { def database = databaseWithExecutor(Stub(OperationExecutor)) def databaseWithExecutor(OperationExecutor executor) { new MongoDatabaseImpl('test', registry, primary(), WriteConcern.ACKNOWLEDGED, false, false, readConcern, - JAVA_LEGACY, null, executor) + JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) } def 'should return the correct bucket name'() { @@ -156,7 +160,9 @@ class GridFSBucketSpecification extends Specification { given: def defaultChunkSizeBytes = 255 * 1024 def database = new MongoDatabaseImpl('test', fromProviders(new DocumentCodecProvider()), secondary(), WriteConcern.ACKNOWLEDGED, - false, false, readConcern, JAVA_LEGACY, null, new TestOperationExecutor([])) + false, false, readConcern, JAVA_LEGACY, null, + new TimeoutSettings(0, 0, 0, null, 0), + new TestOperationExecutor([])) when: def gridFSBucket = new GridFSBucketImpl(database) @@ -172,6 +178,9 @@ class GridFSBucketSpecification extends Specification { given: def filesCollection = Stub(MongoCollection) def chunksCollection = Stub(MongoCollection) + filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) when: @@ -184,7 +193,7 @@ class GridFSBucketSpecification extends Specification { then: expect stream, isTheSameAs(new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, stream.getId(), 'filename', - 255, null), ['closeLock']) + 255, null, null), ['closeLock']) where: clientSession << [null, Stub(ClientSession)] @@ -291,7 +300,9 @@ class GridFSBucketSpecification extends Specification { def fileInfo = new GridFSFile(fileId, 'File 1', 10, 255, new Date(), new Document()) def findIterable = Mock(FindIterable) def filesCollection = Mock(MongoCollection) + filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null def chunksCollection = Stub(MongoCollection) + chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) when: @@ -312,7 +323,8 @@ class GridFSBucketSpecification extends Specification { 1 * findIterable.first() >> fileInfo then: - expect stream, isTheSameAs(new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection), ['closeLock', 'cursorLock']) + expect stream, isTheSameAs(new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, + null), ['closeLock', 'cursorLock']) where: @@ -522,7 +534,9 @@ class GridFSBucketSpecification extends Specification { def fileInfo = new GridFSFile(bsonFileId, filename, 10, 255, new Date(), new Document()) def findIterable = Mock(FindIterable) def filesCollection = Mock(MongoCollection) + filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null def chunksCollection = Stub(MongoCollection) + chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) when: @@ -540,7 +554,7 @@ class GridFSBucketSpecification extends Specification { 1 * findIterable.first() >> fileInfo then: - expect stream, isTheSameAs(new GridFSDownloadStreamImpl(null, fileInfo, chunksCollection), ['closeLock', 'cursorLock']) + expect stream, isTheSameAs(new GridFSDownloadStreamImpl(null, fileInfo, chunksCollection, null), ['closeLock', 'cursorLock']) where: version | skip | sortOrder @@ -600,8 +614,8 @@ class GridFSBucketSpecification extends Specification { then: executor.getReadPreference() == secondary() - expect executor.getReadOperation(), isTheSameAs(new FindOperation(new MongoNamespace('test.fs.files'), decoder) - .filter(filter)) + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(new MongoNamespace('test.fs.files'), decoder).filter(filter)) } def 'should throw an exception if file not found when opening by name'() { diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy index d8b109b1f4b..0064cc9aad8 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy @@ -16,6 +16,7 @@ package com.mongodb.client.gridfs +import com.mongodb.ClusterFixture import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.WriteConcern @@ -35,7 +36,7 @@ class GridFSBucketsSpecification extends Specification { def 'should create a GridFSBucket with default bucket name'() { given: def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern, - JAVA_LEGACY, null, Stub(OperationExecutor)) + JAVA_LEGACY, null, ClusterFixture.TIMEOUT_SETTINGS, Stub(OperationExecutor)) when: def gridFSBucket = GridFSBuckets.create(database) @@ -48,7 +49,7 @@ class GridFSBucketsSpecification extends Specification { def 'should create a GridFSBucket with custom bucket name'() { given: def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern, - JAVA_LEGACY, null, Stub(OperationExecutor)) + JAVA_LEGACY, null, ClusterFixture.TIMEOUT_SETTINGS, Stub(OperationExecutor)) def customName = 'custom' when: diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy index d39ee094230..59bf12ec3a4 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy @@ -35,7 +35,7 @@ class GridFSDownloadStreamSpecification extends Specification { def 'should return the file info'() { when: - def downloadStream = new GridFSDownloadStreamImpl(null, fileInfo, Stub(MongoCollection)) + def downloadStream = new GridFSDownloadStreamImpl(null, fileInfo, Stub(MongoCollection), null) then: downloadStream.getGridFSFile() == fileInfo @@ -59,7 +59,7 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) then: downloadStream.available() == 0 @@ -132,7 +132,8 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection).batchSize(1) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, + null).batchSize(1) then: downloadStream.available() == 0 @@ -215,7 +216,7 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) when: def skipResult = downloadStream.skip(15) @@ -293,7 +294,7 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) when: def readByte = new byte[10] @@ -362,7 +363,7 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) when: downloadStream.mark() @@ -439,7 +440,7 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) when: def readByte = new byte[25] @@ -496,7 +497,7 @@ class GridFSDownloadStreamSpecification extends Specification { def 'should not throw an exception when trying to mark post close'() { given: - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection)) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) downloadStream.close() when: @@ -517,7 +518,7 @@ class GridFSDownloadStreamSpecification extends Specification { def 'should handle negative skip value correctly '() { given: - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection)) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) when: def result = downloadStream.skip(-1) @@ -532,7 +533,7 @@ class GridFSDownloadStreamSpecification extends Specification { def 'should handle skip that is larger or equal to the file length'() { given: def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) when: def result = downloadStream.skip(skipValue) @@ -553,7 +554,7 @@ class GridFSDownloadStreamSpecification extends Specification { def 'should throw if trying to pass negative batchSize'() { given: - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection)) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) when: downloadStream.batchSize(0) @@ -577,7 +578,7 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) when: downloadStream.read() @@ -609,7 +610,7 @@ class GridFSDownloadStreamSpecification extends Specification { def mongoCursor = Mock(MongoCursor) def findIterable = Mock(FindIterable) def chunksCollection = Mock(MongoCollection) - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) when: downloadStream.read() @@ -635,7 +636,7 @@ class GridFSDownloadStreamSpecification extends Specification { def 'should throw an exception when trying to action post close'() { given: - def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection)) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) downloadStream.close() when: diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy index e0686420665..632e59a16d0 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy @@ -38,6 +38,7 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -56,7 +57,7 @@ class GridFSFindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor, - new Document()) + new Document(), true, TIMEOUT_SETTINGS) def findIterable = new GridFSFindIterableImpl(underlying) when: 'default input should be as expected' @@ -73,7 +74,7 @@ class GridFSFindIterableSpecification extends Specification { when: 'overriding initial options' findIterable.filter(new Document('filter', 2)) .sort(new Document('sort', 2)) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .batchSize(99) .limit(99) .skip(9) @@ -87,7 +88,6 @@ class GridFSFindIterableSpecification extends Specification { expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) .filter(new BsonDocument('filter', new BsonInt32(2))) .sort(new BsonDocument('sort', new BsonInt32(2))) - .maxTime(999, MILLISECONDS) .batchSize(99) .limit(99) .skip(9) @@ -101,7 +101,7 @@ class GridFSFindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def findIterable = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1)) + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) when: findIterable.filter(new Document('filter', 1)) @@ -148,7 +148,7 @@ class GridFSFindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor, - new Document()) + new Document(), true, TIMEOUT_SETTINGS) def mongoIterable = new GridFSFindIterableImpl(underlying) when: diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy index e3df2c225e1..c81f947abf0 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy @@ -35,7 +35,7 @@ class GridFSUploadStreamSpecification extends Specification { def 'should return the file id'() { when: def uploadStream = new GridFSUploadStreamImpl(null, Stub(MongoCollection), Stub(MongoCollection), fileId, filename, 255 - , metadata) + , metadata, null) then: uploadStream.getId() == fileId } @@ -45,7 +45,7 @@ class GridFSUploadStreamSpecification extends Specification { def filesCollection = Mock(MongoCollection) def chunksCollection = Mock(MongoCollection) def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 2 - , metadata) + , metadata, null) when: uploadStream.write(1) @@ -71,7 +71,7 @@ class GridFSUploadStreamSpecification extends Specification { def filesCollection = Mock(MongoCollection) def chunksCollection = Mock(MongoCollection) def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 - , null) + , null, null) when: uploadStream.write('file content ' as byte[]) @@ -101,7 +101,8 @@ class GridFSUploadStreamSpecification extends Specification { def chunksCollection = Mock(MongoCollection) def content = 'file content ' as byte[] def metadata = new Document('contentType', 'text/txt') - def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255, metadata) + def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255, + metadata, null) def filesId = fileId when: @@ -159,7 +160,7 @@ class GridFSUploadStreamSpecification extends Specification { def filesCollection = Mock(MongoCollection) def chunksCollection = Mock(MongoCollection) def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 - , metadata) + , metadata, null) when: uploadStream.close() @@ -179,7 +180,7 @@ class GridFSUploadStreamSpecification extends Specification { given: def chunksCollection = Mock(MongoCollection) def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), chunksCollection, fileId, filename, 255 - , metadata) + , metadata, null) when: uploadStream.write('file content ' as byte[]) @@ -199,7 +200,7 @@ class GridFSUploadStreamSpecification extends Specification { def 'should close the stream on abort'() { given: def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), Stub(MongoCollection), fileId, filename, 255 - , metadata) + , metadata, null) uploadStream.write('file content ' as byte[]) uploadStream.abort() @@ -217,7 +218,7 @@ class GridFSUploadStreamSpecification extends Specification { given: def chunksCollection = Mock(MongoCollection) def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), chunksCollection, fileId, filename, 255 - , metadata) + , metadata, null) when: uploadStream.write('file content ' as byte[]) @@ -235,7 +236,7 @@ class GridFSUploadStreamSpecification extends Specification { def filesCollection = Mock(MongoCollection) def chunksCollection = Mock(MongoCollection) def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 - , metadata) + , metadata, null) when: uploadStream.close() uploadStream.write(1) @@ -253,7 +254,7 @@ class GridFSUploadStreamSpecification extends Specification { def filesCollection = Mock(MongoCollection) def chunksCollection = Mock(MongoCollection) def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 - , metadata) + , metadata, null) when: uploadStream.getObjectId() diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy index 64bbae0ad1f..733ee4c57df 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy @@ -41,6 +41,7 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -62,7 +63,7 @@ class AggregateIterableSpecification extends Specification { def pipeline = [new Document('$match', 1)] def aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, - true) + true, TIMEOUT_SETTINGS) when: 'default input should be as expected' aggregationIterable.iterator() @@ -78,8 +79,8 @@ class AggregateIterableSpecification extends Specification { when: 'overriding initial options' aggregationIterable - .maxAwaitTime(99, MILLISECONDS) - .maxTime(999, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) + .maxTime(101, MILLISECONDS) .collation(collation) .hint(new Document('a', 1)) .comment('this is a comment') @@ -93,13 +94,11 @@ class AggregateIterableSpecification extends Specification { .retryReads(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) - .comment(new BsonString('this is a comment')) - .maxAwaitTime(99, MILLISECONDS) - .maxTime(999, MILLISECONDS)) + .comment(new BsonString('this is a comment'))) when: 'both hint and hint string are set' aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) aggregationIterable .hint(new Document('a', 1)) @@ -123,9 +122,8 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .batchSize(99) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -138,7 +136,6 @@ class AggregateIterableSpecification extends Specification { expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.COLLECTION) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -152,14 +149,12 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'aggregation includes $out and is at the database level' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.DATABASE, false) + pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS) .batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -173,7 +168,6 @@ class AggregateIterableSpecification extends Specification { [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.DATABASE) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -187,13 +181,11 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 operation.isAllowDiskUse() == null when: 'toCollection should work as expected' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -220,7 +212,7 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out and hint string' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .hintString('x_1').iterator() def operation = executor.getReadOperation() as AggregateToCollectionOperation @@ -234,7 +226,7 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out and hint and hint string' executor = new TestOperationExecutor([null, null, null, null, null]) new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .hint(new BsonDocument('x', new BsonInt32(1))) .hintString('x_1').iterator() @@ -258,9 +250,8 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $merge' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .batchSize(99) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -274,7 +265,6 @@ class AggregateIterableSpecification extends Specification { new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern, AggregationLevel.COLLECTION) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -288,14 +278,12 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'aggregation includes $merge into a different database' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipelineWithIntoDocument, AggregationLevel.COLLECTION, false) + pipelineWithIntoDocument, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -310,7 +298,6 @@ class AggregateIterableSpecification extends Specification { new BsonDocument('db', new BsonString('db2')).append('coll', new BsonString(collectionName))))], readConcern, writeConcern, AggregationLevel.COLLECTION) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -324,14 +311,12 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == new MongoNamespace('db2', collectionName) operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'aggregation includes $merge and is at the database level' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.DATABASE, false) + pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS) .batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -345,7 +330,6 @@ class AggregateIterableSpecification extends Specification { new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern, AggregationLevel.DATABASE) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -359,12 +343,10 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'toCollection should work as expected' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -393,14 +375,14 @@ class AggregateIterableSpecification extends Specification { when: new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) .iterator() def operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, pipeline, readConcern, writeConcern, - AggregationLevel.COLLECTION)) + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, pipeline, readConcern, + writeConcern, AggregationLevel.COLLECTION)) when: operation = executor.getReadOperation() as FindOperation @@ -436,7 +418,7 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out' def aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) aggregateIterable.toCollection() def operation = executor.getReadOperation() as AggregateToCollectionOperation @@ -455,7 +437,7 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out and is at the database level' aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS) aggregateIterable.toCollection() operation = executor.getReadOperation() as AggregateToCollectionOperation @@ -474,7 +456,7 @@ class AggregateIterableSpecification extends Specification { when: 'toCollection should work as expected' aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) aggregateIterable.toCollection() operation = executor.getReadOperation() as AggregateToCollectionOperation @@ -492,7 +474,7 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out with namespace' aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, outWithDBpipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, outWithDBpipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) aggregateIterable.toCollection() operation = executor.getReadOperation() as AggregateToCollectionOperation @@ -519,7 +501,7 @@ class AggregateIterableSpecification extends Specification { def executor = new TestOperationExecutor([batchCursor, batchCursor]) def pipeline = [new Document('$match', 1)] def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) when: aggregationIterable.first() @@ -545,7 +527,7 @@ class AggregateIterableSpecification extends Specification { def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null]) def pipeline = [new Document('$match', 1), new Document('$out', 'collName')] def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) when: aggregationIterable.first() @@ -576,7 +558,7 @@ class AggregateIterableSpecification extends Specification { def executor = new TestOperationExecutor([new MongoException('failure')]) def pipeline = [new BsonDocument('$match', new BsonInt32(1))] def aggregationIterable = new AggregateIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) when: 'The operation fails with an exception' aggregationIterable.iterator() @@ -592,14 +574,14 @@ class AggregateIterableSpecification extends Specification { when: 'a codec is missing' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false).iterator() + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS).iterator() then: thrown(CodecConfigurationException) when: 'pipeline contains null' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - [null], AggregationLevel.COLLECTION, false).iterator() + [null], AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS).iterator() then: thrown(IllegalArgumentException) @@ -627,7 +609,8 @@ class AggregateIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, [new Document('$match', 1)], AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, [new Document('$match', 1)], AggregationLevel.COLLECTION, false, + TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -672,7 +655,7 @@ class AggregateIterableSpecification extends Specification { def batchSize = 5 def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, Stub(OperationExecutor), [new Document('$match', 1)], AggregationLevel.COLLECTION, - false) + false, TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy index 7141db09c43..b66373b221f 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy @@ -20,7 +20,6 @@ import com.mongodb.Function import com.mongodb.MongoException import com.mongodb.MongoNamespace import com.mongodb.ReadConcern -import com.mongodb.WriteConcern import com.mongodb.client.ClientSession import com.mongodb.client.model.Collation import com.mongodb.client.model.changestream.ChangeStreamDocument @@ -43,6 +42,7 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -54,7 +54,6 @@ class ChangeStreamIterableSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) def readPreference = secondary() def readConcern = ReadConcern.MAJORITY - def writeConcern = WriteConcern.MAJORITY def collation = Collation.builder().locale('en').build() def 'should build the expected ChangeStreamOperation'() { @@ -62,7 +61,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([null, null, null, null, null]) def pipeline = [new Document('$match', 1)] def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, - executor, pipeline, Document, ChangeStreamLevel.COLLECTION, true) + executor, pipeline, Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) when: 'default input should be as expected' changeStreamIterable.iterator() @@ -72,14 +71,17 @@ class ChangeStreamIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ChangeStreamOperation(namespace, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, - [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION).retryReads(true)) + expect operation, isTheSameAs(new ChangeStreamOperation(namespace, + FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [BsonDocument.parse('{$match: 1}')], codec, + ChangeStreamLevel.COLLECTION) + .retryReads(true)) readPreference == secondary() when: 'overriding initial options' def resumeToken = RawBsonDocument.parse('{_id: {a: 1}}') def startAtOperationTime = new BsonTimestamp(99) - changeStreamIterable.collation(collation).maxAwaitTime(99, MILLISECONDS) + changeStreamIterable.collation(collation) + .maxAwaitTime(101, MILLISECONDS) .fullDocument(FullDocument.UPDATE_LOOKUP) .fullDocumentBeforeChange(FullDocumentBeforeChange.WHEN_AVAILABLE) .resumeAfter(resumeToken).startAtOperationTime(startAtOperationTime) @@ -88,12 +90,14 @@ class ChangeStreamIterableSpecification extends Specification { operation = executor.getReadOperation() as ChangeStreamOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ChangeStreamOperation(namespace, FullDocument.UPDATE_LOOKUP, - FullDocumentBeforeChange.WHEN_AVAILABLE, - [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION) + expect operation, isTheSameAs(new ChangeStreamOperation(namespace, + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.WHEN_AVAILABLE, [BsonDocument.parse('{$match: 1}')], codec, + ChangeStreamLevel.COLLECTION) .retryReads(true) - .collation(collation).maxAwaitTime(99, MILLISECONDS) - .resumeAfter(resumeToken).startAtOperationTime(startAtOperationTime).startAfter(resumeToken)) + .collation(collation) + .resumeAfter(resumeToken) + .startAtOperationTime(startAtOperationTime) + .startAfter(resumeToken)) } def 'should use ClientSession'() { @@ -103,7 +107,7 @@ class ChangeStreamIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def changeStreamIterable = new ChangeStreamIterableImpl(clientSession, namespace, codecRegistry, readPreference, readConcern, - executor, [], Document, ChangeStreamLevel.COLLECTION, true) + executor, [], Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) when: changeStreamIterable.first() @@ -127,7 +131,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([new MongoException('failure')]) def pipeline = [new BsonDocument('$match', new BsonInt32(1))] def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, - executor, pipeline, BsonDocument, ChangeStreamLevel.COLLECTION, true) + executor, pipeline, BsonDocument, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) when: 'The operation fails with an exception' changeStreamIterable.iterator() @@ -137,14 +141,14 @@ class ChangeStreamIterableSpecification extends Specification { when: 'a codec is missing' new ChangeStreamIterableImpl(null, namespace, altRegistry, readPreference, readConcern, executor, pipeline, Document, - ChangeStreamLevel.COLLECTION, true).iterator() + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).iterator() then: thrown(CodecConfigurationException) when: 'pipeline contains null' new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [null], Document, - ChangeStreamLevel.COLLECTION, true).iterator() + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).iterator() then: thrown(IllegalArgumentException) @@ -159,7 +163,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([cursor(cannedResults), cursor(cannedResults), cursor(cannedResults), cursor(cannedResults)]) def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [], - Document, ChangeStreamLevel.COLLECTION, true) + Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -207,7 +211,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([cursor(cannedResults), cursor(cannedResults), cursor(cannedResults), cursor(cannedResults)]) def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [], - Document, ChangeStreamLevel.COLLECTION, true).withDocumentClass(RawBsonDocument) + Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).withDocumentClass(RawBsonDocument) when: def results = mongoIterable.first() @@ -251,7 +255,8 @@ class ChangeStreamIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, - Stub(OperationExecutor), [BsonDocument.parse('{$match: 1}')], BsonDocument, ChangeStreamLevel.COLLECTION, true) + Stub(OperationExecutor), [BsonDocument.parse('{$match: 1}')], BsonDocument, ChangeStreamLevel.COLLECTION, true, + TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy index 329e8e9a8b8..49332bc8ed3 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy @@ -19,7 +19,6 @@ package com.mongodb.client.internal import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.client.ClientSession -import com.mongodb.internal.IgnorableRequestContext import com.mongodb.internal.binding.ClusterBinding import com.mongodb.internal.binding.ConnectionSource import com.mongodb.internal.binding.ReadWriteBinding @@ -27,15 +26,19 @@ import com.mongodb.internal.connection.Cluster import com.mongodb.internal.session.ClientSessionContext import spock.lang.Specification +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + class ClientSessionBindingSpecification extends Specification { def 'should return the session context from the binding'() { given: def session = Stub(ClientSession) - def wrappedBinding = Stub(ClusterBinding) + def wrappedBinding = Stub(ClusterBinding) { + getOperationContext() >> OPERATION_CONTEXT + } def binding = new ClientSessionBinding(session, false, wrappedBinding) when: - def context = binding.getSessionContext() + def context = binding.getOperationContext().getSessionContext() then: (context as ClientSessionContext).getClientSession() == session @@ -44,12 +47,14 @@ class ClientSessionBindingSpecification extends Specification { def 'should return the session context from the connection source'() { given: def session = Stub(ClientSession) - def wrappedBinding = Mock(ClusterBinding) + def wrappedBinding = Mock(ClusterBinding) { + getOperationContext() >> OPERATION_CONTEXT + } def binding = new ClientSessionBinding(session, false, wrappedBinding) when: def readConnectionSource = binding.getReadConnectionSource() - def context = readConnectionSource.getSessionContext() + def context = readConnectionSource.getOperationContext().getSessionContext() then: (context as ClientSessionContext).getClientSession() == session @@ -59,7 +64,7 @@ class ClientSessionBindingSpecification extends Specification { when: def writeConnectionSource = binding.getWriteConnectionSource() - context = writeConnectionSource.getSessionContext() + context = writeConnectionSource.getOperationContext().getSessionContext() then: (context as ClientSessionContext).getClientSession() == session @@ -144,7 +149,7 @@ class ClientSessionBindingSpecification extends Specification { def binding = new ClientSessionBinding(session, ownsSession, wrappedBinding) then: - binding.getSessionContext().isImplicitSession() == ownsSession + binding.getOperationContext().getSessionContext().isImplicitSession() == ownsSession where: ownsSession << [true, false] @@ -152,6 +157,6 @@ class ClientSessionBindingSpecification extends Specification { private ReadWriteBinding createStubBinding() { def cluster = Stub(Cluster) - new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, null, IgnorableRequestContext.INSTANCE) + new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) } } diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy index 990c39a4634..18a13195d00 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy @@ -16,20 +16,19 @@ package com.mongodb.client.internal +import com.mongodb.ClusterFixture import com.mongodb.ReadPreference import com.mongodb.ServerAddress import com.mongodb.connection.ClusterId import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ConnectionId import com.mongodb.connection.ServerId -import com.mongodb.internal.IgnorableRequestContext -import com.mongodb.internal.binding.StaticBindingContext +import com.mongodb.internal.TimeoutContext import com.mongodb.internal.bulk.InsertRequest import com.mongodb.internal.bulk.WriteRequestWithIndex import com.mongodb.internal.connection.Connection -import com.mongodb.internal.connection.NoOpSessionContext -import com.mongodb.internal.connection.OperationContext import com.mongodb.internal.connection.SplittablePayload +import com.mongodb.internal.time.Timeout import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonArray import org.bson.BsonBinary @@ -60,27 +59,32 @@ class CryptConnectionSpecification extends Specification { def crypt = Mock(Crypt) def cryptConnection = new CryptConnection(wrappedConnection, crypt) def codec = new DocumentCodec() + def timeoutContext = Mock(TimeoutContext) + def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + def operationTimeout = Mock(Timeout) + timeoutContext.getTimeout() >> operationTimeout + def encryptedCommand = toRaw(new BsonDocument('find', new BsonString('test')) .append('ssid', new BsonBinary(6 as byte, new byte[10]))) def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1)) .append('cursor', - new BsonDocument('firstBatch', - new BsonArray([new BsonDocument('_id', new BsonInt32(1)) - .append('ssid', new BsonBinary(6 as byte, new byte[10]))])))) + new BsonDocument('firstBatch', + new BsonArray([new BsonDocument('_id', new BsonInt32(1)) + .append('ssid', new BsonBinary(6 as byte, new byte[10]))])))) def decryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1)) .append('cursor', new BsonDocument('firstBatch', - new BsonArray([new BsonDocument('_id', new BsonInt32(1)) - .append('ssid', new BsonString('555-55-5555'))])))) - def operationContext = new OperationContext() - def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, null, IgnorableRequestContext.INSTANCE, operationContext) + new BsonArray([new BsonDocument('_id', new BsonInt32(1)) + .append('ssid', new BsonString('555-55-5555'))])))) + when: + def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('find', 'test') .append('filter', new Document('ssid', '555-55-5555')), codec), - new NoOpFieldNameValidator(), ReadPreference.primary(), codec, context) + new NoOpFieldNameValidator(), ReadPreference.primary(), codec, operationContext) then: _ * wrappedConnection.getDescription() >> { @@ -88,14 +92,14 @@ class CryptConnectionSpecification extends Specification { 1000, 1024 * 16_000, 1024 * 48_000, []) } 1 * crypt.encrypt('db', toRaw(new BsonDocument('find', new BsonString('test')) - .append('filter', new BsonDocument('ssid', new BsonString('555-55-5555'))))) >> { - encryptedCommand + .append('filter', new BsonDocument('ssid', new BsonString('555-55-5555')))), operationTimeout) >> { + encryptedCommand } 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), - _ as RawBsonDocumentCodec, context, true, null, null) >> { + _ as RawBsonDocumentCodec, operationContext, true, null, null) >> { encryptedResponse } - 1 * crypt.decrypt(encryptedResponse) >> { + 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { decryptedResponse } response == rawToDocument(decryptedResponse) @@ -121,14 +125,16 @@ class CryptConnectionSpecification extends Specification { def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1))) def decryptedResponse = encryptedResponse - def operationContext = new OperationContext() - def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, null, IgnorableRequestContext.INSTANCE, operationContext) + def timeoutContext = Mock(TimeoutContext) + def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + def operationTimeout = Mock(Timeout) + timeoutContext.getTimeout() >> operationTimeout when: def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('insert', 'test'), codec), new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), - context, true, payload, new NoOpFieldNameValidator(),) + operationContext, true, payload, new NoOpFieldNameValidator(),) then: _ * wrappedConnection.getDescription() >> { @@ -141,14 +147,14 @@ class CryptConnectionSpecification extends Specification { new BsonDocument('_id', new BsonInt32(1)) .append('ssid', new BsonString('555-55-5555')) .append('b', new BsonBinary(bytes)) - ])))) >> { + ]))), operationTimeout) >> { encryptedCommand } 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), - _ as RawBsonDocumentCodec, context, true, null, null,) >> { + _ as RawBsonDocumentCodec, operationContext, true, null, null,) >> { encryptedResponse } - 1 * crypt.decrypt(encryptedResponse) >> { + 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { decryptedResponse } response == rawToBsonDocument(decryptedResponse) @@ -176,13 +182,15 @@ class CryptConnectionSpecification extends Specification { def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1))) def decryptedResponse = encryptedResponse - def operationContext = new OperationContext() - def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, null, IgnorableRequestContext.INSTANCE, operationContext) + def timeoutContext = Mock(TimeoutContext) + def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + def operationTimeout = Mock(Timeout) + timeoutContext.getTimeout() >> operationTimeout when: def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('insert', 'test'), codec), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), context, true, payload, + new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload, new NoOpFieldNameValidator()) then: @@ -195,14 +203,14 @@ class CryptConnectionSpecification extends Specification { new BsonArray([ new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2)) - ])))) >> { + ]))), operationTimeout) >> { encryptedCommand } 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), - _ as RawBsonDocumentCodec, context, true, null, null,) >> { + _ as RawBsonDocumentCodec, operationContext, true, null, null,) >> { encryptedResponse } - 1 * crypt.decrypt(encryptedResponse) >> { + 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { decryptedResponse } response == rawToBsonDocument(decryptedResponse) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy index 8a7898581a2..3baac05653a 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy @@ -37,6 +37,7 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -55,7 +56,7 @@ class DistinctIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def distinctIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, 'field', new BsonDocument(), true) + executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) when: 'default input should be as expected' distinctIterable.iterator() @@ -69,14 +70,14 @@ class DistinctIterableSpecification extends Specification { readPreference == secondary() when: 'overriding initial options' - distinctIterable.filter(new Document('field', 1)).maxTime(999, MILLISECONDS).batchSize(99).collation(collation).iterator() + distinctIterable.filter(new Document('field', 1)).maxTime(100, MILLISECONDS).batchSize(99).collation(collation).iterator() operation = executor.getReadOperation() as DistinctOperation then: 'should use the overrides' - expect operation, isTheSameAs(new DistinctOperation(namespace, 'field', new DocumentCodec()) - .filter(new BsonDocument('field', new BsonInt32(1))) - .maxTime(999, MILLISECONDS).collation(collation).retryReads(true)) + expect operation, isTheSameAs( + new DistinctOperation(namespace, 'field', new DocumentCodec()) + .filter(new BsonDocument('field', new BsonInt32(1))).collation(collation).retryReads(true)) } def 'should use ClientSession'() { @@ -86,7 +87,7 @@ class DistinctIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def distinctIterable = new DistinctIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, executor, 'field', new BsonDocument()) + readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) when: distinctIterable.first() @@ -109,7 +110,7 @@ class DistinctIterableSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) def executor = new TestOperationExecutor([new MongoException('failure')]) def distinctIterable = new DistinctIterableImpl(null, namespace, Document, BsonDocument, codecRegistry, readPreference, - readConcern, executor, 'field', new BsonDocument()) + readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) when: 'The operation fails with an exception' distinctIterable.iterator() @@ -145,7 +146,7 @@ class DistinctIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, ReadConcern.LOCAL, - executor, 'field', new BsonDocument()) + executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -189,7 +190,7 @@ class DistinctIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - Stub(OperationExecutor), 'field', new BsonDocument()) + Stub(OperationExecutor), 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy index 98848a84dfa..e2f7cae2d62 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.client.internal - import com.mongodb.CursorType import com.mongodb.Function import com.mongodb.MongoException @@ -39,10 +38,10 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders import static spock.util.matcher.HamcrestSupport.expect @@ -59,11 +58,9 @@ class FindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null]) def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1), true) + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) .sort(new Document('sort', 1)) .projection(new Document('projection', 1)) - .maxTime(10, SECONDS) - .maxAwaitTime(20, SECONDS) .batchSize(100) .limit(100) .skip(10) @@ -90,8 +87,6 @@ class FindIterableSpecification extends Specification { .filter(new BsonDocument('filter', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(10000, MILLISECONDS) - .maxAwaitTime(20000, MILLISECONDS) .batchSize(100) .limit(100) .skip(10) @@ -111,8 +106,8 @@ class FindIterableSpecification extends Specification { findIterable.filter(new Document('filter', 2)) .sort(new Document('sort', 2)) .projection(new Document('projection', 2)) - .maxTime(9, SECONDS) - .maxAwaitTime(18, SECONDS) + .maxTime(101, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) .batchSize(99) .limit(99) .skip(9) @@ -132,32 +127,31 @@ class FindIterableSpecification extends Specification { operation = executor.getReadOperation() as FindOperation then: 'should use the overrides' - expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) - .filter(new BsonDocument('filter', new BsonInt32(2))) - .sort(new BsonDocument('sort', new BsonInt32(2))) - .projection(new BsonDocument('projection', new BsonInt32(2))) - .maxTime(9000, MILLISECONDS) - .maxAwaitTime(18000, MILLISECONDS) - .batchSize(99) - .limit(99) - .skip(9) - .cursorType(CursorType.Tailable) - .noCursorTimeout(true) - .partial(true) - .collation(collation) - .comment(new BsonString('alt comment')) - .hint(new BsonDocument('hint', new BsonInt32(2))) - .min(new BsonDocument('min', new BsonInt32(2))) - .max(new BsonDocument('max', new BsonInt32(2))) - .returnKey(true) - .showRecordId(true) - .allowDiskUse(true) - .retryReads(true) + expect operation, isTheSameAs( + new FindOperation(namespace, new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(2))) + .sort(new BsonDocument('sort', new BsonInt32(2))) + .projection(new BsonDocument('projection', new BsonInt32(2))) + .batchSize(99) + .limit(99) + .skip(9) + .cursorType(CursorType.Tailable) + .noCursorTimeout(true) + .partial(true) + .collation(collation) + .comment(new BsonString('alt comment')) + .hint(new BsonDocument('hint', new BsonInt32(2))) + .min(new BsonDocument('min', new BsonInt32(2))) + .max(new BsonDocument('max', new BsonInt32(2))) + .returnKey(true) + .showRecordId(true) + .allowDiskUse(true) + .retryReads(true) ) when: 'passing nulls to nullable methods' new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1), true) + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) .filter(null as Bson) .collation(null) .projection(null) @@ -182,7 +176,7 @@ class FindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def findIterable = new FindIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1)) + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) when: findIterable.first() @@ -204,7 +198,7 @@ class FindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1), true) + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) when: findIterable.filter(new Document('filter', 1)) @@ -244,7 +238,7 @@ class FindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document()) + executor, new Document(), true, TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -288,7 +282,7 @@ class FindIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, Stub(OperationExecutor), new Document()) + readConcern, Stub(OperationExecutor), new Document(), true, TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null @@ -310,7 +304,7 @@ class FindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor]) def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document()) + executor, new Document(), true, TIMEOUT_SETTINGS) when: mongoIterable.forEach(new Consumer() { diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy index 3756a80094f..559935c05ee 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy @@ -30,11 +30,12 @@ import org.bson.codecs.DocumentCodecProvider import org.bson.codecs.ValueCodecProvider import spock.lang.Specification +import java.util.concurrent.TimeUnit import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary -import static java.util.concurrent.TimeUnit.MILLISECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders import static spock.util.matcher.HamcrestSupport.expect @@ -48,12 +49,11 @@ class ListCollectionsIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null, null]) def listCollectionIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, - readPreference, executor, true) + readPreference, executor, true, TIMEOUT_SETTINGS) .filter(new Document('filter', 1)) .batchSize(100) - .maxTime(1000, MILLISECONDS) def listCollectionNamesIterable = new ListCollectionsIterableImpl(null, 'db', true, Document, codecRegistry, - readPreference, executor, true) + readPreference, executor, true, TIMEOUT_SETTINGS) when: 'default input should be as expected' listCollectionIterable.iterator() @@ -63,19 +63,19 @@ class ListCollectionsIterableSpecification extends Specification { then: expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) - .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100).maxTime(1000, MILLISECONDS) + .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100) .retryReads(true) .authorizedCollections(false)) readPreference == secondary() when: 'overriding initial options' - listCollectionIterable.filter(new Document('filter', 2)).batchSize(99).maxTime(999, MILLISECONDS).iterator() + listCollectionIterable.filter(new Document('filter', 2)).batchSize(99).maxTime(100, TimeUnit.MILLISECONDS).iterator() operation = executor.getReadOperation() as ListCollectionsOperation then: 'should use the overrides' expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) - .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99).maxTime(999, MILLISECONDS) + .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99) .retryReads(true)) when: 'requesting collection names only' @@ -105,7 +105,7 @@ class ListCollectionsIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def listCollectionIterable = new ListCollectionsIterableImpl(clientSession, 'db', false, Document, codecRegistry, - readPreference, executor, true) + readPreference, executor, true, TIMEOUT_SETTINGS) when: listCollectionIterable.first() @@ -145,7 +145,7 @@ class ListCollectionsIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, readPreference, - executor, true) + executor, true, TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -189,7 +189,7 @@ class ListCollectionsIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, readPreference, - Stub(OperationExecutor), true) + Stub(OperationExecutor), true, TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy index bfe4adb26f9..8df91709486 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy @@ -30,6 +30,7 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -45,8 +46,8 @@ class ListDatabasesIterableSpecification extends Specification { def 'should build the expected listCollectionOperation'() { given: def executor = new TestOperationExecutor([null, null, null]) - def listDatabaseIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor) - .maxTime(1000, MILLISECONDS) + def listDatabaseIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor, true, + TIMEOUT_SETTINGS) when: 'default input should be as expected' listDatabaseIterable.iterator() @@ -55,26 +56,26 @@ class ListDatabasesIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()).maxTime(1000, MILLISECONDS) + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) .retryReads(true)) readPreference == secondary() when: 'overriding initial options' - listDatabaseIterable.maxTime(999, MILLISECONDS).filter(Document.parse('{a: 1}')).nameOnly(true).iterator() + listDatabaseIterable.maxTime(100, MILLISECONDS).filter(Document.parse('{a: 1}')).nameOnly(true).iterator() operation = executor.getReadOperation() as ListDatabasesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()).maxTime(999, MILLISECONDS) + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).retryReads(true)) when: 'overriding initial options' - listDatabaseIterable.maxTime(101, MILLISECONDS).filter(Document.parse('{a: 1}')).authorizedDatabasesOnly(true).iterator() + listDatabaseIterable.filter(Document.parse('{a: 1}')).authorizedDatabasesOnly(true).iterator() operation = executor.getReadOperation() as ListDatabasesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()).maxTime(101, MILLISECONDS) + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).authorizedDatabasesOnly(true).retryReads(true)) } @@ -99,7 +100,8 @@ class ListDatabasesIterableSpecification extends Specification { } } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) - def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor) + def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor, + true, TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -143,7 +145,7 @@ class ListDatabasesIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, - Stub(OperationExecutor)) + Stub(OperationExecutor), true, TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy index d1090fe1525..d11c59d46d2 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy @@ -31,6 +31,7 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -47,8 +48,8 @@ class ListIndexesIterableSpecification extends Specification { def 'should build the expected listIndexesOperation'() { given: def executor = new TestOperationExecutor([null, null]) - def listIndexesIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, executor) - .batchSize(100).maxTime(1000, MILLISECONDS) + def listIndexesIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, + executor, true, TIMEOUT_SETTINGS).batchSize(100) when: 'default input should be as expected' listIndexesIterable.iterator() @@ -58,19 +59,19 @@ class ListIndexesIterableSpecification extends Specification { then: expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) - .batchSize(100).maxTime(1000, MILLISECONDS).retryReads(true)) + .batchSize(100).retryReads(true)) readPreference == secondary() when: 'overriding initial options' listIndexesIterable.batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .iterator() operation = executor.getReadOperation() as ListIndexesOperation then: 'should use the overrides' expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) - .batchSize(99).maxTime(999, MILLISECONDS).retryReads(true)) + .batchSize(99).retryReads(true)) } def 'should use ClientSession'() { @@ -80,7 +81,7 @@ class ListIndexesIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def listIndexesIterable = new ListIndexesIterableImpl(clientSession, namespace, Document, codecRegistry, readPreference, - executor) + executor, true, TIMEOUT_SETTINGS) when: listIndexesIterable.first() @@ -120,7 +121,8 @@ class ListIndexesIterableSpecification extends Specification { } } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) - def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, executor) + def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, + executor, true, TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -164,7 +166,7 @@ class ListIndexesIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, - Stub(OperationExecutor)) + Stub(OperationExecutor), true, TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy index c24f479b784..b6cb01d31cb 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy @@ -42,6 +42,7 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -62,7 +63,7 @@ class MapReduceIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) when: 'default input should be as expected' mapReduceIterable.iterator() @@ -71,8 +72,8 @@ class MapReduceIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('map'), - new BsonJavaScript('reduce'), new DocumentCodec()) + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) .verbose(true)) readPreference == secondary() @@ -80,7 +81,7 @@ class MapReduceIterableSpecification extends Specification { mapReduceIterable.filter(new Document('filter', 1)) .finalizeFunction('finalize') .limit(999) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .scope(new Document('scope', 1)) .sort(new Document('sort', 1)) .verbose(false) @@ -90,12 +91,11 @@ class MapReduceIterableSpecification extends Specification { operation = (executor.getReadOperation() as MapReduceIterableImpl.WrappedMapReduceReadOperation).getOperation() then: 'should use the overrides' - expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('map'), - new BsonJavaScript('reduce'), new DocumentCodec()) + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))) .finalizeFunction(new BsonJavaScript('finalize')) .limit(999) - .maxTime(999, MILLISECONDS) .scope(new BsonDocument('scope', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .verbose(false) @@ -109,14 +109,14 @@ class MapReduceIterableSpecification extends Specification { when: 'mapReduce to a collection' def collectionNamespace = new MongoNamespace('dbName', 'collName') - def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - writeConcern, executor, 'map', 'reduce') + def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, + readPreference, readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) .collectionName(collectionNamespace.getCollectionName()) .databaseName(collectionNamespace.getDatabaseName()) .filter(new Document('filter', 1)) .finalizeFunction('finalize') .limit(999) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .scope(new Document('scope', 1)) .sort(new Document('sort', 1)) .verbose(false) @@ -128,13 +128,12 @@ class MapReduceIterableSpecification extends Specification { mapReduceIterable.iterator() def operation = executor.getWriteOperation() as MapReduceToCollectionOperation - def expectedOperation = new MapReduceToCollectionOperation(namespace, new BsonJavaScript('map'), - new BsonJavaScript('reduce'), 'collName', writeConcern) + def expectedOperation = new MapReduceToCollectionOperation(namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'collName', writeConcern) .databaseName(collectionNamespace.getDatabaseName()) .filter(new BsonDocument('filter', new BsonInt32(1))) .finalizeFunction(new BsonJavaScript('finalize')) .limit(999) - .maxTime(999, MILLISECONDS) .scope(new BsonDocument('scope', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .verbose(false) @@ -170,7 +169,7 @@ class MapReduceIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) when: mapReduceIterable.first() @@ -195,7 +194,7 @@ class MapReduceIterableSpecification extends Specification { } def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null]) def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) .collectionName('collName') when: @@ -228,7 +227,7 @@ class MapReduceIterableSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) def executor = new TestOperationExecutor([new MongoException('failure')]) def mapReduceIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, - readPreference, readConcern, writeConcern, executor, 'map', 'reduce') + readPreference, readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) when: 'The operation fails with an exception' @@ -245,7 +244,7 @@ class MapReduceIterableSpecification extends Specification { when: 'a codec is missing' new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - 'map', 'reduce').iterator() + 'map', 'reduce', TIMEOUT_SETTINGS).iterator() then: thrown(CodecConfigurationException) @@ -274,7 +273,7 @@ class MapReduceIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) when: def results = mongoIterable.first() @@ -318,7 +317,7 @@ class MapReduceIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, Stub(OperationExecutor), 'map', 'reduce') + readConcern, writeConcern, Stub(OperationExecutor), 'map', 'reduce', TIMEOUT_SETTINGS) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy new file mode 100644 index 00000000000..62c16330950 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy @@ -0,0 +1,263 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.MongoClient +import com.mongodb.client.MongoIterable +import com.mongodb.internal.TimeoutSettings +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel +import com.mongodb.internal.connection.Cluster +import com.mongodb.internal.session.ServerSessionPool +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.client.internal.TestHelper.execute +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class MongoClusterSpecification extends Specification { + + private static final CodecRegistry CODEC_REGISTRY = fromProviders(new ValueCodecProvider()) + private static final MongoClientSettings CLIENT_SETTINGS = MongoClientSettings.builder().build() + private static final TimeoutSettings TIMEOUT_SETTINGS = TimeoutSettings.create(CLIENT_SETTINGS) + private final Cluster cluster = Stub(Cluster) + private final MongoClient originator = Stub(MongoClient) + private final ServerSessionPool serverSessionPool = Stub(ServerSessionPool) + private final OperationExecutor operationExecutor = Stub(OperationExecutor) + + def 'should pass the correct settings to getDatabase'() { + given: + def settings = MongoClientSettings.builder() + .readPreference(secondary()) + .writeConcern(WriteConcern.MAJORITY) + .readConcern(ReadConcern.MAJORITY) + .retryWrites(true) + .codecRegistry(CODEC_REGISTRY) + .build() + def operationExecutor = new TestOperationExecutor([]) + def mongoClientCluster = createMongoCluster(settings, operationExecutor) + + when: + def database = mongoClientCluster.getDatabase('name') + + then: + expect database, isTheSameAs(expectedDatabase) + + where: + expectedDatabase << new MongoDatabaseImpl('name', CODEC_REGISTRY, secondary(), + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, + TIMEOUT_SETTINGS, new TestOperationExecutor([])) + } + + def 'should behave correctly when using withCodecRegistry'() { + given: + def newCodecRegistry = fromProviders(new ValueCodecProvider()) + + when: + def mongoCluster = createMongoCluster().withCodecRegistry(newCodecRegistry) + + then: + (mongoCluster.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == UNSPECIFIED + expect mongoCluster, isTheSameAs(createMongoCluster( + MongoClientSettings.builder(CLIENT_SETTINGS).codecRegistry(newCodecRegistry).build())) + } + + def 'should behave correctly when using withReadPreference'() { + given: + def newReadPreference = ReadPreference.secondaryPreferred() + + when: + def mongoCluster = createMongoCluster().withReadPreference(newReadPreference) + + then: + mongoCluster.getReadPreference() == newReadPreference + expect mongoCluster, isTheSameAs( + createMongoCluster(MongoClientSettings.builder(CLIENT_SETTINGS).readPreference(newReadPreference).build())) + } + + def 'should behave correctly when using withWriteConcern'() { + given: + def newWriteConcern = WriteConcern.MAJORITY + + when: + def mongoCluster = createMongoCluster().withWriteConcern(newWriteConcern) + + then: + mongoCluster.getWriteConcern() == newWriteConcern + expect mongoCluster, isTheSameAs(createMongoCluster( + MongoClientSettings.builder(CLIENT_SETTINGS).writeConcern(newWriteConcern).build())) + } + + def 'should behave correctly when using withReadConcern'() { + given: + def newReadConcern = ReadConcern.MAJORITY + + when: + def mongoCluster = createMongoCluster().withReadConcern(newReadConcern) + + then: + mongoCluster.getReadConcern() == newReadConcern + expect mongoCluster, isTheSameAs(createMongoCluster( + MongoClientSettings.builder(CLIENT_SETTINGS).readConcern(newReadConcern).build())) + } + + def 'should behave correctly when using withTimeout'() { + when: + def mongoCluster = createMongoCluster().withTimeout(10_000, TimeUnit.MILLISECONDS) + + then: + mongoCluster.getTimeout(TimeUnit.MILLISECONDS) == 10_000 + expect mongoCluster, isTheSameAs(createMongoCluster(MongoClientSettings.builder(CLIENT_SETTINGS) + .timeout(10_000, TimeUnit.MILLISECONDS).build())) + + when: + createMongoCluster().withTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + } + + + def 'should use ListDatabasesIterableImpl correctly'() { + given: + def executor = new TestOperationExecutor([null, null]) + def mongoCluster = createMongoCluster(executor) + def listDatabasesMethod = mongoCluster.&listDatabases + def listDatabasesNamesMethod = mongoCluster.&listDatabaseNames + + when: + def listDatabasesIterable = execute(listDatabasesMethod, session) + + then: + expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, Document, + CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS)) + + when: + listDatabasesIterable = execute(listDatabasesMethod, session, BsonDocument) + + then: + expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, + CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS)) + + when: + def listDatabaseNamesIterable = execute(listDatabasesNamesMethod, session) as MongoIterable + + then: + // listDatabaseNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it + expect listDatabaseNamesIterable.getMapped(), isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, + CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS) + .nameOnly(true)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should create ChangeStreamIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def namespace = new MongoNamespace('admin', 'ignored') + def settings = MongoClientSettings.builder() + .readPreference(secondary()) + .readConcern(ReadConcern.MAJORITY) + .codecRegistry(getDefaultCodecRegistry()) + .build() + def readPreference = settings.getReadPreference() + def readConcern = settings.getReadConcern() + def mongoCluster = createMongoCluster(settings, executor) + def watchMethod = mongoCluster.&watch + + when: + def changeStreamIterable = execute(watchMethod, session) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry, + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), + ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)]) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], Document, ChangeStreamLevel.CLIENT, + true, TIMEOUT_SETTINGS), ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, + ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), ['codec']) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the ChangeStreamIterable pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + def mongoCluster = createMongoCluster(executor) + + when: + mongoCluster.watch((Class) null) + + then: + thrown(IllegalArgumentException) + + when: + mongoCluster.watch([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + + MongoClusterImpl createMongoCluster() { + createMongoCluster(CLIENT_SETTINGS) + } + + MongoClusterImpl createMongoCluster(final MongoClientSettings settings) { + createMongoCluster(settings, operationExecutor) + } + + MongoClusterImpl createMongoCluster(final OperationExecutor operationExecutor) { + createMongoCluster(CLIENT_SETTINGS, operationExecutor) + } + + MongoClusterImpl createMongoCluster(final MongoClientSettings settings, final OperationExecutor operationExecutor) { + new MongoClusterImpl(null, cluster, settings.codecRegistry, null, null, + originator, operationExecutor, settings.readConcern, settings.readPreference, settings.retryReads, settings.retryWrites, + null, serverSessionPool, TimeoutSettings.create(settings), settings.uuidRepresentation, settings.writeConcern) + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy index 5951a5b6589..2fba3b90a0a 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy @@ -92,6 +92,7 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.primary import static com.mongodb.ReadPreference.secondary @@ -122,7 +123,7 @@ class MongoCollectionSpecification extends Specification { def 'should return the correct name from getName'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, - true, readConcern, JAVA_LEGACY, null, new TestOperationExecutor([null])) + true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([null])) expect: collection.getNamespace() == namespace @@ -135,12 +136,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withDocumentClass(newClass) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withDocumentClass(newClass) then: collection.getDocumentClass() == newClass expect collection, isTheSameAs(new MongoCollectionImpl(namespace, newClass, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor)) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) } def 'should behave correctly when using withCodecRegistry'() { @@ -150,12 +151,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, C_SHARP_LEGACY, null, executor).withCodecRegistry(newCodecRegistry) + true, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor).withCodecRegistry(newCodecRegistry) then: (collection.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, collection.getCodecRegistry(), readPreference, - ACKNOWLEDGED, true, true, readConcern, C_SHARP_LEGACY, null, executor)) + ACKNOWLEDGED, true, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor)) } def 'should behave correctly when using withReadPreference'() { @@ -165,12 +166,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withReadPreference(newReadPreference) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withReadPreference(newReadPreference) then: collection.getReadPreference() == newReadPreference expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, newReadPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor)) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) } def 'should behave correctly when using withWriteConcern'() { @@ -180,12 +181,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withWriteConcern(newWriteConcern) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withWriteConcern(newWriteConcern) then: collection.getWriteConcern() == newWriteConcern expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, newWriteConcern, - true, true, readConcern, JAVA_LEGACY, null, executor)) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) } def 'should behave correctly when using withReadConcern'() { @@ -195,12 +196,33 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withReadConcern(newReadConcern) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withReadConcern(newReadConcern) then: collection.getReadConcern() == newReadConcern expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, newReadConcern, JAVA_LEGACY, null, executor)) + true, true, newReadConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withTimeout'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + def newCollection = collection.withTimeout(10_000, MILLISECONDS) + + then: + newCollection.getTimeout(MILLISECONDS) == 10_000 + expect newCollection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS.withTimeout(10_000, MILLISECONDS), executor)) + + when: + collection.withTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) } def 'should use CountOperation correctly with documentCount'() { @@ -208,8 +230,9 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([1L, 2L, 3L, 4L]) def filter = new BsonDocument() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, - true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new CountDocumentsOperation(namespace).filter(filter).retryReads(true) + true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new CountDocumentsOperation(namespace) + .filter(filter).retryReads(true) def countMethod = collection.&countDocuments @@ -232,13 +255,12 @@ class MongoCollectionSpecification extends Specification { when: def hint = new BsonDocument('hint', new BsonInt32(1)) - execute(countMethod, session, filter, new CountOptions().hint(hint).skip(10).limit(100) - .maxTime(100, MILLISECONDS).collation(collation)) + execute(countMethod, session, filter, new CountOptions().hint(hint).skip(10).limit(100).collation(collation)) operation = executor.getReadOperation() as CountDocumentsOperation then: executor.getClientSession() == session - expect operation, isTheSameAs(expectedOperation.filter(filter).hint(hint).skip(10).limit(100).maxTime(100, MILLISECONDS) + expect operation, isTheSameAs(expectedOperation.filter(filter).hint(hint).skip(10).limit(100) .collation(collation)) where: @@ -249,7 +271,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([1L, 2L]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, - true, readConcern, JAVA_LEGACY, null, executor) + true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = new EstimatedDocumentCountOperation(namespace) .retryReads(true) @@ -264,12 +286,13 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = new EstimatedDocumentCountOperation(namespace).retryReads(true) execute(countMethod, session, new EstimatedDocumentCountOptions().maxTime(100, MILLISECONDS)) operation = executor.getReadOperation() as EstimatedDocumentCountOperation then: executor.getClientSession() == session - expect operation, isTheSameAs(expectedOperation.maxTime(100, MILLISECONDS)) + expect operation, isTheSameAs(expectedOperation) where: session << [null] @@ -279,7 +302,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def filter = new Document('a', 1) def distinctMethod = collection.&distinct @@ -288,14 +311,14 @@ class MongoCollectionSpecification extends Specification { then: expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String, - codecRegistry, readPreference, readConcern, executor, 'field', new BsonDocument(), true)) + codecRegistry, readPreference, readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS)) when: distinctIterable = execute(distinctMethod, session, 'field', String).filter(filter) then: expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String, - codecRegistry, readPreference, readConcern, executor, 'field', filter, true)) + codecRegistry, readPreference, readConcern, executor, 'field', filter, true, TIMEOUT_SETTINGS)) where: session << [null, Stub(ClientSession)] @@ -305,7 +328,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def findMethod = collection.&find when: @@ -313,28 +336,28 @@ class MongoCollectionSpecification extends Specification { then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document, codecRegistry, - readPreference, readConcern, executor, new BsonDocument(), true)) + readPreference, readConcern, executor, new BsonDocument(), true, TIMEOUT_SETTINGS)) when: findIterable = execute(findMethod, session, BsonDocument) then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument, - codecRegistry, readPreference, readConcern, executor, new BsonDocument(), true)) + codecRegistry, readPreference, readConcern, executor, new BsonDocument(), true, TIMEOUT_SETTINGS)) when: findIterable = execute(findMethod, session, new Document()) then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document, - codecRegistry, readPreference, readConcern, executor, new Document(), true)) + codecRegistry, readPreference, readConcern, executor, new Document(), true, TIMEOUT_SETTINGS)) when: findIterable = execute(findMethod, session, new Document(), BsonDocument) then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument, - codecRegistry, readPreference, readConcern, executor, new Document(), true)) + codecRegistry, readPreference, readConcern, executor, new Document(), true, TIMEOUT_SETTINGS)) where: session << [null, Stub(ClientSession)] @@ -344,7 +367,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def aggregateMethod = collection.&aggregate when: @@ -353,7 +376,7 @@ class MongoCollectionSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, Document, codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)], - AggregationLevel.COLLECTION, true)) + AggregationLevel.COLLECTION, true, TIMEOUT_SETTINGS)) when: aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument) @@ -361,7 +384,7 @@ class MongoCollectionSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, BsonDocument, codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)], - AggregationLevel.COLLECTION, true)) + AggregationLevel.COLLECTION, true, TIMEOUT_SETTINGS)) where: session << [null, Stub(ClientSession)] @@ -371,7 +394,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: collection.aggregate(null) @@ -390,7 +413,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def watchMethod = collection.&watch when: @@ -398,7 +421,7 @@ class MongoCollectionSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, - readPreference, readConcern, executor, [], Document, ChangeStreamLevel.COLLECTION, true), + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), ['codec']) when: @@ -407,7 +430,7 @@ class MongoCollectionSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], Document, - ChangeStreamLevel.COLLECTION, true), ['codec']) + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), ['codec']) when: changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) @@ -415,7 +438,7 @@ class MongoCollectionSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, - ChangeStreamLevel.COLLECTION, true), ['codec']) + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), ['codec']) where: session << [null, Stub(ClientSession)] @@ -425,7 +448,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: collection.watch((Class) null) @@ -444,7 +467,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def mapReduceMethod = collection.&mapReduce when: @@ -452,14 +475,14 @@ class MongoCollectionSpecification extends Specification { then: expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, Document, - codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce')) + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', TIMEOUT_SETTINGS)) when: mapReduceIterable = execute(mapReduceMethod, session, 'map', 'reduce', BsonDocument) then: expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, BsonDocument, - codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce')) + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', TIMEOUT_SETTINGS)) where: session << [null, Stub(ClientSession)] @@ -471,7 +494,7 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, BsonDocument, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassValidation, List filters -> new MixedBulkWriteOperation(namespace, [ new InsertRequest(BsonDocument.parse('{_id: 1}')), @@ -538,7 +561,7 @@ class MongoCollectionSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) def executor = new TestOperationExecutor([new MongoException('failure')]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: collection.bulkWrite(null) @@ -565,7 +588,7 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { WriteConcern wc, Boolean bypassDocumentValidation -> new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], true, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation) @@ -610,7 +633,7 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassDocumentValidation -> new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), @@ -656,7 +679,7 @@ class MongoCollectionSpecification extends Specification { def 'should validate the insertMany data correctly'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) when: collection.insertMany(null) @@ -678,7 +701,7 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def deleteOneMethod = collection.&deleteOne when: @@ -720,7 +743,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([bulkWriteException]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: collection.deleteOne(new Document('_id', 1)) @@ -741,7 +764,7 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def deleteManyMethod = collection.&deleteMany when: @@ -785,7 +808,7 @@ class MongoCollectionSpecification extends Specification { def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(1, modifiedCount, upsertedId) : UpdateResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassValidation, Collation collation -> new MixedBulkWriteOperation(namespace, @@ -827,7 +850,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([bulkWriteException]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: collection.replaceOne(new Document('_id', 1), new Document('_id', 1)) @@ -855,7 +878,7 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(1, 0, null) : UpdateResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, List filters, BsonDocument hintDoc, String hintStr -> new MixedBulkWriteOperation(namespace, @@ -904,7 +927,7 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(5, 3, null) : UpdateResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, List filters, BsonDocument hintDoc, String hintStr -> new MixedBulkWriteOperation(namespace, @@ -948,7 +971,7 @@ class MongoCollectionSpecification extends Specification { def 'should translate MongoBulkWriteException to MongoWriteException'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: collection.insertOne(new Document('_id', 1)) @@ -970,7 +993,7 @@ class MongoCollectionSpecification extends Specification { new WriteConcernError(42, 'codeName', 'Message', new BsonDocument()), new ServerAddress(), [] as Set)]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: collection.insertOne(new Document('_id', 1)) @@ -986,8 +1009,9 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, + new DocumentCodec()) .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndDeleteMethod = collection.&findOneAndDelete @@ -999,14 +1023,20 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = + new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .collation(collation) execute(findOneAndDeleteMethod, session, new Document('a', 1), - new FindOneAndDeleteOptions().projection(new Document('projection', 1)) - .maxTime(100, MILLISECONDS).collation(collation)) + new FindOneAndDeleteOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .collation(collation)) operation = executor.getWriteOperation() as FindAndDeleteOperation then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).collation(collation)) + expect operation, isTheSameAs(expectedOperation) where: [writeConcern, session, retryWrites] << [ @@ -1022,9 +1052,10 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : WriteConcernResult.unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), - new BsonDocument('a', new BsonInt32(10))).filter(new BsonDocument('a', new BsonInt32(1))) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, + retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndReplaceMethod = collection.&findOneAndReplace when: @@ -1035,24 +1066,22 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, + retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .bypassDocumentValidation(false) + .collation(collation) execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10), - new FindOneAndReplaceOptions().projection(new Document('projection', 1)) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(false)) - operation = executor.getWriteOperation() as FindAndReplaceOperation - - then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(false)) - - when: - execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10), - new FindOneAndReplaceOptions().projection(new Document('projection', 1)) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(true).collation(collation)) + new FindOneAndReplaceOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .bypassDocumentValidation(false) + .collation(collation)) operation = executor.getWriteOperation() as FindAndReplaceOperation then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(true).collation(collation)) + expect operation, isTheSameAs(expectedOperation) where: [writeConcern, session, retryWrites] << [ @@ -1068,9 +1097,10 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), - new BsonDocument('a', new BsonInt32(10))).filter(new BsonDocument('a', new BsonInt32(1))) + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, + new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndUpdateMethod = collection.&findOneAndUpdate when: @@ -1081,15 +1111,25 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, + new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation) + .arrayFilters(arrayFilters) + execute(findOneAndUpdateMethod, session, new Document('a', 1), new Document('a', 10), - new FindOneAndUpdateOptions().projection(new Document('projection', 1)).maxTime(100, MILLISECONDS) - .bypassDocumentValidation(bypassDocumentValidation).collation(collation).arrayFilters(arrayFilters)) + new FindOneAndUpdateOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation) + .arrayFilters(arrayFilters)) operation = executor.getWriteOperation() as FindAndUpdateOperation then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(bypassDocumentValidation).collation(collation) - .arrayFilters(arrayFilters)) + expect operation, isTheSameAs(expectedOperation) where: [writeConcern, arrayFilters, bypassDocumentValidation, session, retryWrites] << [ @@ -1105,7 +1145,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = new DropCollectionOperation(namespace, ACKNOWLEDGED) def dropMethod = collection.&drop @@ -1125,7 +1165,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null, null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def createIndexMethod = collection.&createIndex def createIndexesMethod = collection.&createIndexes @@ -1153,10 +1193,12 @@ class MongoCollectionSpecification extends Specification { indexNames == ['key_1', 'key1_1'] when: - expectedOperation = expectedOperation.maxTime(10, MILLISECONDS) + expectedOperation = new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), + new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) indexNames = execute(createIndexesMethod, session, [new IndexModel(new Document('key', 1)), new IndexModel(new Document('key1', 1))], - new CreateIndexOptions().maxTime(10, MILLISECONDS)) + new CreateIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as CreateIndexesOperation then: @@ -1236,7 +1278,7 @@ class MongoCollectionSpecification extends Specification { def 'should validate the createIndexes data correctly'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) when: collection.createIndexes(null) @@ -1256,7 +1298,7 @@ class MongoCollectionSpecification extends Specification { def batchCursor = Stub(BatchCursor) def executor = new TestOperationExecutor([batchCursor, batchCursor, batchCursor]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def listIndexesMethod = collection.&listIndexes when: @@ -1277,12 +1319,12 @@ class MongoCollectionSpecification extends Specification { executor.getClientSession() == session when: - execute(listIndexesMethod, session).batchSize(10).maxTime(10, MILLISECONDS).iterator() + execute(listIndexesMethod, session).batchSize(10).maxTime(100, MILLISECONDS).iterator() operation = executor.getReadOperation() as ListIndexesOperation then: expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).batchSize(10) - .maxTime(10, MILLISECONDS).retryReads(true)) + .retryReads(true)) executor.getClientSession() == session where: @@ -1293,7 +1335,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def dropIndexMethod = collection.&dropIndex when: @@ -1316,8 +1358,8 @@ class MongoCollectionSpecification extends Specification { executor.getClientSession() == session when: - expectedOperation = expectedOperation.maxTime(10, MILLISECONDS) - execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(10, MILLISECONDS)) + expectedOperation = new DropIndexOperation(namespace, keys, ACKNOWLEDGED) + execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as DropIndexOperation then: @@ -1332,7 +1374,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED) def dropIndexesMethod = collection.&dropIndexes @@ -1345,8 +1387,8 @@ class MongoCollectionSpecification extends Specification { executor.getClientSession() == session when: - expectedOperation = expectedOperation.maxTime(10, MILLISECONDS) - execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(10, MILLISECONDS)) + expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED) + execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as DropIndexOperation then: @@ -1361,7 +1403,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def newNamespace = new MongoNamespace(namespace.getDatabaseName(), 'newName') def renameCollectionOptions = new RenameCollectionOptions().dropTarget(dropTarget) def expectedOperation = new RenameCollectionOperation(namespace, newNamespace, ACKNOWLEDGED) @@ -1392,7 +1434,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([acknowledged(INSERT, 1, 0, [], [])]) def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry) def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def document = new ImmutableDocument(['a': 1]) when: @@ -1414,7 +1456,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([null]) def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry) def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def document = new ImmutableDocument(['a': 1]) when: @@ -1434,7 +1476,8 @@ class MongoCollectionSpecification extends Specification { def 'should validate the client session correctly'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, + Stub(OperationExecutor)) when: collection.aggregate(null, [Document.parse('{$match:{}}')]) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy index 81cbad9f34f..e702dd5e276 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy @@ -44,6 +44,9 @@ import org.bson.codecs.UuidCodec import org.bson.codecs.ValueCodecProvider import spock.lang.Specification +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.primary import static com.mongodb.ReadPreference.primaryPreferred @@ -66,7 +69,7 @@ class MongoDatabaseSpecification extends Specification { def 'should throw IllegalArgumentException if name is invalid'() { when: new MongoDatabaseImpl('a.b', codecRegistry, readPreference, writeConcern, false, false, readConcern, - JAVA_LEGACY, null, new TestOperationExecutor([])) + JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) then: thrown(IllegalArgumentException) @@ -75,7 +78,7 @@ class MongoDatabaseSpecification extends Specification { def 'should throw IllegalArgumentException from getCollection if collectionName is invalid'() { given: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern, - JAVA_LEGACY, null, new TestOperationExecutor([])) + JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) when: database.getCollection('') @@ -87,7 +90,7 @@ class MongoDatabaseSpecification extends Specification { def 'should return the correct name from getName'() { given: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern, - JAVA_LEGACY, null, new TestOperationExecutor([])) + JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) expect: database.getName() == name @@ -100,13 +103,13 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, true, readConcern, - C_SHARP_LEGACY, null, executor) + C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor) .withCodecRegistry(newCodecRegistry) then: (database.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY expect database, isTheSameAs(new MongoDatabaseImpl(name, database.getCodecRegistry(), readPreference, writeConcern, - false, true, readConcern, C_SHARP_LEGACY, null, executor)) + false, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor)) } def 'should behave correctly when using withReadPreference'() { @@ -116,13 +119,13 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) .withReadPreference(newReadPreference) then: database.getReadPreference() == newReadPreference expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, newReadPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor)) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) } def 'should behave correctly when using withWriteConcern'() { @@ -132,13 +135,13 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) .withWriteConcern(newWriteConcern) then: database.getWriteConcern() == newWriteConcern expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, newWriteConcern, false, false, - readConcern, JAVA_LEGACY, null, executor)) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) } def 'should behave correctly when using withReadConcern'() { @@ -148,13 +151,34 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) .withReadConcern(newReadConcern) then: database.getReadConcern() == newReadConcern expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - newReadConcern, JAVA_LEGACY, null, executor)) + newReadConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withTimeout'() { + given: + def executor = new TestOperationExecutor([]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + def newDatabase = database.withTimeout(10_000, TimeUnit.MILLISECONDS) + + then: + newDatabase.getTimeout(TimeUnit.MILLISECONDS) == 10_000 + expect newDatabase, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS.withTimeout(10_000, TimeUnit.MILLISECONDS), executor)) + + when: + database.withTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) } def 'should be able to executeCommand correctly'() { @@ -162,42 +186,38 @@ class MongoDatabaseSpecification extends Specification { def command = new BsonDocument('command', new BsonInt32(1)) def executor = new TestOperationExecutor([null, null, null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def runCommandMethod = database.&runCommand when: execute(runCommandMethod, session, command) - def operation = executor.getReadOperation() as CommandReadOperation + executor.getReadOperation() as CommandReadOperation then: - operation.command == command executor.getClientSession() == session executor.getReadPreference() == primary() when: execute(runCommandMethod, session, command, primaryPreferred()) - operation = executor.getReadOperation() as CommandReadOperation + executor.getReadOperation() as CommandReadOperation then: - operation.command == command executor.getClientSession() == session executor.getReadPreference() == primaryPreferred() when: execute(runCommandMethod, session, command, BsonDocument) - operation = executor.getReadOperation() as CommandReadOperation + executor.getReadOperation() as CommandReadOperation then: - operation.command == command executor.getClientSession() == session executor.getReadPreference() == primary() when: execute(runCommandMethod, session, command, primaryPreferred(), BsonDocument) - operation = executor.getReadOperation() as CommandReadOperation + executor.getReadOperation() as CommandReadOperation then: - operation.command == command executor.getClientSession() == session executor.getReadPreference() == primaryPreferred() @@ -209,7 +229,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def dropMethod = database.&drop when: @@ -228,7 +248,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def listCollectionsMethod = database.&listCollections def listCollectionNamesMethod = database.&listCollectionNames @@ -237,14 +257,14 @@ class MongoDatabaseSpecification extends Specification { then: expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false, - Document, codecRegistry, primary(), executor, false)) + Document, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS)) when: listCollectionIterable = execute(listCollectionsMethod, session, BsonDocument) then: expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false, - BsonDocument, codecRegistry, primary(), executor, false)) + BsonDocument, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS)) when: def listCollectionNamesIterable = execute(listCollectionNamesMethod, session) @@ -252,7 +272,7 @@ class MongoDatabaseSpecification extends Specification { then: // `listCollectionNamesIterable` is an instance of a `ListCollectionNamesIterableImpl`, so have to get the wrapped iterable from it expect listCollectionNamesIterable.getWrapped(), isTheSameAs(new ListCollectionsIterableImpl<>(session, name, - true, BsonDocument, codecRegistry, primary(), executor, false)) + true, BsonDocument, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS)) where: session << [null, Stub(ClientSession)] @@ -263,7 +283,7 @@ class MongoDatabaseSpecification extends Specification { def collectionName = 'collectionName' def executor = new TestOperationExecutor([null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def createCollectionMethod = database.&createCollection when: @@ -314,7 +334,7 @@ class MongoDatabaseSpecification extends Specification { def writeConcern = WriteConcern.JOURNALED def executor = new TestOperationExecutor([null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def createViewMethod = database.&createView when: @@ -344,7 +364,7 @@ class MongoDatabaseSpecification extends Specification { def viewName = 'view1' def viewOn = 'col1' def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) when: database.createView(viewName, viewOn, null) @@ -364,7 +384,7 @@ class MongoDatabaseSpecification extends Specification { def executor = new TestOperationExecutor([]) def namespace = new MongoNamespace(name, 'ignored') def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def watchMethod = database.&watch when: @@ -372,7 +392,7 @@ class MongoDatabaseSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, - readPreference, readConcern, executor, [], Document, ChangeStreamLevel.DATABASE, false), + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) when: @@ -381,7 +401,7 @@ class MongoDatabaseSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], Document, - ChangeStreamLevel.DATABASE, false), ['codec']) + ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) when: changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) @@ -389,7 +409,7 @@ class MongoDatabaseSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, - ChangeStreamLevel.DATABASE, false), ['codec']) + ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) where: session << [null, Stub(ClientSession)] @@ -399,7 +419,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: database.watch((Class) null) @@ -418,7 +438,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def aggregateMethod = database.&aggregate when: @@ -427,7 +447,7 @@ class MongoDatabaseSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, [], AggregationLevel.DATABASE, - false), ['codec']) + false, TIMEOUT_SETTINGS), ['codec']) when: aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)]) @@ -435,7 +455,7 @@ class MongoDatabaseSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)], - AggregationLevel.DATABASE, false), ['codec']) + AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) when: aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument) @@ -443,7 +463,7 @@ class MongoDatabaseSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, BsonDocument, codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)], - AggregationLevel.DATABASE, false), ['codec']) + AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) where: session << [null, Stub(ClientSession)] @@ -453,7 +473,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) when: database.aggregate(null, []) @@ -478,7 +498,7 @@ class MongoDatabaseSpecification extends Specification { given: def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) def database = new MongoDatabaseImpl('databaseName', codecRegistry, secondary(), WriteConcern.MAJORITY, true, true, - ReadConcern.MAJORITY, JAVA_LEGACY, null, new TestOperationExecutor([])) + ReadConcern.MAJORITY, JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) when: def collection = database.getCollection('collectionName') @@ -489,14 +509,14 @@ class MongoDatabaseSpecification extends Specification { where: expectedCollection = new MongoCollectionImpl(new MongoNamespace('databaseName', 'collectionName'), Document, fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]), secondary(), - WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, JAVA_LEGACY, null, + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) } def 'should validate the client session correctly'() { given: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, - false, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + false, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) when: database.createCollection(null, 'newColl') diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java index a605d6542e7..28206e1be26 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java @@ -19,6 +19,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.lang.Nullable; @@ -68,6 +69,16 @@ public T execute(final WriteOperation operation, final ReadConcern readCo return getResponse(); } + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) { + return this; + } + + @Override + public TimeoutSettings getTimeoutSettings() { + throw new UnsupportedOperationException("Not supported"); + } + @SuppressWarnings("unchecked") private T getResponse() { Object response = responses.remove(0); diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java b/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java new file mode 100644 index 00000000000..c3569624414 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java @@ -0,0 +1,192 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.internal.time.Timeout; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.internal.mockito.MongoMockito.mock; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.longThat; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +class TimeoutHelperTest { + + private static final String TIMEOUT_ERROR_MESSAGE = "message"; + + @Test + void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsNull() { + //given + MongoCollection collection = mock(MongoCollection.class); + + //when + MongoCollection result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, null); + + //then + assertEquals(collection, result); + } + + @Test + void shouldNotSetRemainingTimeoutDatabaseWhenTimeoutIsNull() { + //given + MongoDatabase database = mock(MongoDatabase.class); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, null); + + //then + assertEquals(database, result); + } + + @Test + void shouldSetRemainingTimeoutOnCollectionWhenTimeoutIsInfinite() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + + //when + MongoCollection result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()); + + //then + assertEquals(collectionWithTimeout, result); + verify(collection).withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldNotSetRemainingTimeoutOnDatabaseWhenTimeoutIsInfinite() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()); + + //then + assertEquals(databaseWithTimeout, result); + verify(database).withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldSetRemainingTimeoutOnCollectionWhenTimeout() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoCollection result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout); + + //then + verify(collection).withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(collectionWithTimeout, result); + } + + @Test + void shouldSetRemainingTimeoutOnDatabaseWhenTimeout() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout); + + //then + verify(database).withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(databaseWithTimeout, result); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + verifyNoInteractions(collection); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + verifyNoInteractions(database); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + verifyNoInteractions(database); + } + +} From a461dbabc338fefb8e003c1b30112174a3e99791 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Thu, 25 Jul 2024 00:07:59 -0700 Subject: [PATCH 49/90] Add unified, legacy, and prose tests for CSFLE multi-KMS support. (#1424) JAVA-5275 --------- Co-authored-by: Valentin Kovalenko --- .evergreen/.evg.yml | 4 + .../run-csfle-tests-with-mongocryptd.sh | 30 +- .evergreen/run-tests.sh | 46 +- .../com/mongodb/AutoEncryptionSettings.java | 14 +- .../com/mongodb/ClientEncryptionSettings.java | 12 +- .../client/model/vault/DataKeyOptions.java | 10 +- .../model/vault/RewrapManyDataKeyOptions.java | 10 +- .../legacy/namedKMS.json | 197 +++ .../namedKMS-createDataKey.json | 396 +++++ .../namedKMS-explicit.json | 130 ++ .../namedKMS-rewrapManyDataKey.json | 1385 +++++++++++++++++ ...ptionAwsCredentialFromEnvironmentTest.java | 85 + ...bstractClientSideEncryptionKmsTlsTest.java | 49 +- .../AbstractClientSideEncryptionTest.java | 1 + .../UnifiedClientEncryptionHelper.java | 115 +- .../mongodb/client/unified/UnifiedTest.java | 4 + 16 files changed, 2411 insertions(+), 77 deletions(-) create mode 100644 driver-core/src/test/resources/client-side-encryption/legacy/namedKMS.json create mode 100644 driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-createDataKey.json create mode 100644 driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-explicit.json create mode 100644 driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-rewrapManyDataKey.json diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml index 97a7545d60d..2499bc884df 100644 --- a/.evergreen/.evg.yml +++ b/.evergreen/.evg.yml @@ -265,6 +265,8 @@ functions: env: AWS_ACCESS_KEY_ID: ${aws_access_key_id} AWS_SECRET_ACCESS_KEY: ${aws_secret_access_key} + AWS_ACCESS_KEY_ID_AWS_KMS_NAMED: ${aws_access_key_id_2} + AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED: ${aws_secret_access_key_2} AWS_DEFAULT_REGION: us-east-1 AZURE_TENANT_ID: ${azure_tenant_id} AZURE_CLIENT_ID: ${azure_client_id} @@ -709,6 +711,8 @@ functions: env: AWS_ACCESS_KEY_ID: ${aws_access_key_id} AWS_SECRET_ACCESS_KEY: ${aws_secret_access_key} + AWS_ACCESS_KEY_ID_AWS_KMS_NAMED: ${aws_access_key_id_2} + AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED: ${aws_secret_access_key_2} AWS_DEFAULT_REGION: us-east-1 AZURE_TENANT_ID: ${azure_tenant_id} AZURE_CLIENT_ID: ${azure_client_id} diff --git a/.evergreen/run-csfle-tests-with-mongocryptd.sh b/.evergreen/run-csfle-tests-with-mongocryptd.sh index 7927ec5eb85..c9733e58a8a 100755 --- a/.evergreen/run-csfle-tests-with-mongocryptd.sh +++ b/.evergreen/run-csfle-tests-with-mongocryptd.sh @@ -4,20 +4,22 @@ set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail # Supported/used environment variables: -# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) -# JAVA_VERSION Set the version of java to be used. Java versions can be set from the java toolchain /opt/java -# AWS_ACCESS_KEY_ID The AWS access key identifier for client-side encryption -# AWS_SECRET_ACCESS_KEY The AWS secret access key for client-side encryption -# AWS_TEMP_ACCESS_KEY_ID The temporary AWS access key identifier for client-side encryption -# AWS_TEMP_SECRET_ACCESS_KEY The temporary AWS secret access key for client-side encryption -# AWS_TEMP_SESSION_TOKEN The temporary AWS session token for client-side encryption -# AZURE_TENANT_ID The Azure tenant identifier for client-side encryption -# AZURE_CLIENT_ID The Azure client identifier for client-side encryption -# AZURE_CLIENT_SECRET The Azure client secret for client-side encryption -# GCP_EMAIL The GCP email for client-side encryption -# GCP_PRIVATE_KEY The GCP private key for client-side encryption -# AZUREKMS_KEY_VAULT_ENDPOINT The Azure key vault endpoint for integration tests -# AZUREKMS_KEY_NAME The Azure key name endpoint for integration tests +# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) +# JAVA_VERSION Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# AWS_ACCESS_KEY_ID The AWS access key identifier for client-side encryption +# AWS_SECRET_ACCESS_KEY The AWS secret access key for client-side encryption +# AWS_ACCESS_KEY_ID_AWS_KMS_NAMED The AWS access key identifier for client-side encryption's named KMS provider. +# AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED The AWS secret access key for client-side encryption's named KMS provider. +# AWS_TEMP_ACCESS_KEY_ID The temporary AWS access key identifier for client-side encryption +# AWS_TEMP_SECRET_ACCESS_KEY The temporary AWS secret access key for client-side encryption +# AWS_TEMP_SESSION_TOKEN The temporary AWS session token for client-side encryption +# AZURE_TENANT_ID The Azure tenant identifier for client-side encryption +# AZURE_CLIENT_ID The Azure client identifier for client-side encryption +# AZURE_CLIENT_SECRET The Azure client secret for client-side encryption +# GCP_EMAIL The GCP email for client-side encryption +# GCP_PRIVATE_KEY The GCP private key for client-side encryption +# AZUREKMS_KEY_VAULT_ENDPOINT The Azure key vault endpoint for integration tests +# AZUREKMS_KEY_NAME The Azure key name endpoint for integration tests MONGODB_URI=${MONGODB_URI:-} diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 06a31098177..49390e88d26 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -4,28 +4,30 @@ set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail # Supported/used environment variables: -# AUTH Set to enable authentication. Values are: "auth" / "noauth" (default) -# SSL Set to enable SSL. Values are "ssl" / "nossl" (default) -# NETTY_SSL_PROVIDER The Netty TLS/SSL protocol provider. Ignored unless SSL is "ssl" and STREAM_TYPE is "netty". Values are "JDK", "OPENSSL", null (a.k.a. "" or '') (default). -# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) -# TOPOLOGY Allows you to modify variables and the MONGODB_URI based on test topology -# Supported values: "server", "replica_set", "sharded_cluster" -# COMPRESSOR Set to enable compression. Values are "snappy" and "zlib" (default is no compression) -# STREAM_TYPE Set the stream type. Values are "nio2" or "netty". Defaults to "nio2". -# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java -# SLOW_TESTS_ONLY Set to true to only run the slow tests -# AWS_ACCESS_KEY_ID The AWS access key identifier for client-side encryption -# AWS_SECRET_ACCESS_KEY The AWS secret access key for client-side encryption -# AWS_TEMP_ACCESS_KEY_ID The temporary AWS access key identifier for client-side encryption -# AWS_TEMP_SECRET_ACCESS_KEY The temporary AWS secret access key for client-side encryption -# AWS_TEMP_SESSION_TOKEN The temporary AWS session token for client-side encryption -# AZURE_TENANT_ID The Azure tenant identifier for client-side encryption -# AZURE_CLIENT_ID The Azure client identifier for client-side encryption -# AZURE_CLIENT_SECRET The Azure client secret for client-side encryption -# GCP_EMAIL The GCP email for client-side encryption -# GCP_PRIVATE_KEY The GCP private key for client-side encryption -# AZUREKMS_KEY_VAULT_ENDPOINT The Azure key vault endpoint for integration tests -# AZUREKMS_KEY_NAME The Azure key name endpoint for integration tests +# AUTH Set to enable authentication. Values are: "auth" / "noauth" (default) +# SSL Set to enable SSL. Values are "ssl" / "nossl" (default) +# NETTY_SSL_PROVIDER The Netty TLS/SSL protocol provider. Ignored unless SSL is "ssl" and STREAM_TYPE is "netty". Values are "JDK", "OPENSSL", null (a.k.a. "" or '') (default). +# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) +# TOPOLOGY Allows you to modify variables and the MONGODB_URI based on test topology +# Supported values: "server", "replica_set", "sharded_cluster" +# COMPRESSOR Set to enable compression. Values are "snappy" and "zlib" (default is no compression) +# STREAM_TYPE Set the stream type. Values are "nio2" or "netty". Defaults to "nio2". +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# SLOW_TESTS_ONLY Set to true to only run the slow tests +# AWS_ACCESS_KEY_ID The AWS access key identifier for client-side encryption +# AWS_SECRET_ACCESS_KEY The AWS secret access key for client-side encryption +# AWS_ACCESS_KEY_ID_AWS_KMS_NAMED The AWS access key identifier for client-side encryption's named KMS provider. +# AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED The AWS secret access key for client-side encryption's named KMS provider. +# AWS_TEMP_ACCESS_KEY_ID The temporary AWS access key identifier for client-side encryption +# AWS_TEMP_SECRET_ACCESS_KEY The temporary AWS secret access key for client-side encryption +# AWS_TEMP_SESSION_TOKEN The temporary AWS session token for client-side encryption +# AZURE_TENANT_ID The Azure tenant identifier for client-side encryption +# AZURE_CLIENT_ID The Azure client identifier for client-side encryption +# AZURE_CLIENT_SECRET The Azure client secret for client-side encryption +# GCP_EMAIL The GCP email for client-side encryption +# GCP_PRIVATE_KEY The GCP private key for client-side encryption +# AZUREKMS_KEY_VAULT_ENDPOINT The Azure key vault endpoint for integration tests +# AZUREKMS_KEY_NAME The Azure key name endpoint for integration tests AUTH=${AUTH:-noauth} SSL=${SSL:-nossl} diff --git a/driver-core/src/main/com/mongodb/AutoEncryptionSettings.java b/driver-core/src/main/com/mongodb/AutoEncryptionSettings.java index 1e2be618150..904f148e891 100644 --- a/driver-core/src/main/com/mongodb/AutoEncryptionSettings.java +++ b/driver-core/src/main/com/mongodb/AutoEncryptionSettings.java @@ -293,9 +293,15 @@ public String getKeyVaultNamespace() { /** * Gets the map of KMS provider properties. * + *

      Multiple KMS providers can be specified within this map. Each KMS provider is identified by a unique key. + * Keys are formatted as either {@code "KMS provider type"} or {@code "KMS provider type:KMS provider name"} (e.g., "aws" or "aws:myname"). + * The KMS provider name must only contain alphanumeric characters (a-z, A-Z, 0-9), underscores (_), and must not be empty. *

      - * Multiple KMS providers may be specified. The following KMS providers are supported: "aws", "azure", "gcp" and "local". The - * kmsProviders map values differ by provider: + * Supported KMS provider types include "aws", "azure", "gcp", and "local". The provider name is optional and allows + * for the configuration of multiple providers of the same type under different names (e.g., "aws:name1" and + * "aws:name2" could represent different AWS accounts). + *

      + * The kmsProviders map values differ by provider type. The following properties are supported for each provider type: *

      *

      * For "aws", the properties are: @@ -335,7 +341,6 @@ public String getKeyVaultNamespace() { *

        *
      • key: byte[] of length 96, the local key
      • *
      - * *

      * It is also permitted for the value of a kms provider to be an empty map, in which case the driver will first *

      @@ -343,7 +348,8 @@ public String getKeyVaultNamespace() { *
    14. use the {@link Supplier} configured in {@link #getKmsProviderPropertySuppliers()} to obtain a non-empty map
    15. *
    16. attempt to obtain the properties from the environment
    17. * - * + * However, KMS providers containing a name (e.g., "aws:myname") do not support dynamically obtaining KMS properties from the {@link Supplier} + * or environment. * @return map of KMS provider properties * @see #getKmsProviderPropertySuppliers() */ diff --git a/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java index ee9b88817e7..d2188b3d329 100644 --- a/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java +++ b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java @@ -216,9 +216,15 @@ public String getKeyVaultNamespace() { /** * Gets the map of KMS provider properties. * + *

      Multiple KMS providers can be specified within this map. Each KMS provider is identified by a unique key. + * Keys are formatted as either {@code "KMS provider type"} or {@code "KMS provider type:KMS provider name"} (e.g., "aws" or "aws:myname"). + * The KMS provider name must only contain alphanumeric characters (a-z, A-Z, 0-9), underscores (_), and must not be empty. *

      - * Multiple KMS providers may be specified. The following KMS providers are supported: "aws", "azure", "gcp" and "local". The - * kmsProviders map values differ by provider: + * Supported KMS provider types include "aws", "azure", "gcp", and "local". The provider name is optional and allows + * for the configuration of multiple providers of the same type under different names (e.g., "aws:name1" and + * "aws:name2" could represent different AWS accounts). + *

      + * The kmsProviders map values differ by provider type. The following properties are supported for each provider type: *

      *

      * For "aws", the properties are: @@ -265,6 +271,8 @@ public String getKeyVaultNamespace() { *

    18. use the {@link Supplier} configured in {@link #getKmsProviderPropertySuppliers()} to obtain a non-empty map
    19. *
    20. attempt to obtain the properties from the environment
    21. * + * However, KMS providers containing a name (e.g., "aws:myname") do not support dynamically obtaining KMS properties from the {@link Supplier} + * or environment. * @return map of KMS provider properties * @see #getKmsProviderPropertySuppliers() */ diff --git a/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java index e9b60dc3771..14a52a39904 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java @@ -95,14 +95,14 @@ public List getKeyAltNames() { * The masterKey identifies a KMS-specific key used to encrypt the new data key. *

      *

      - * If the kmsProvider is "aws" the master key is required and must contain the following fields: + * If the kmsProvider type is "aws" the master key is required and must contain the following fields: *

      *
        *
      • region: a String containing the AWS region in which to locate the master key
      • *
      • key: a String containing the Amazon Resource Name (ARN) to the AWS customer master key
      • *
      *

      - * If the kmsProvider is "azure" the master key is required and must contain the following fields: + * If the kmsProvider type is "azure" the master key is required and must contain the following fields: *

      *
        *
      • keyVaultEndpoint: a String with the host name and an optional port. Example: "example.vault.azure.net".
      • @@ -110,7 +110,7 @@ public List getKeyAltNames() { *
      • keyVersion: an optional String, the specific version of the named key, defaults to using the key's primary version.
      • *
      *

      - * If the kmsProvider is "gcp" the master key is required and must contain the following fields: + * If the kmsProvidertype type is "gcp" the master key is required and must contain the following fields: *

      *
        *
      • projectId: a String
      • @@ -121,7 +121,7 @@ public List getKeyAltNames() { *
      • endpoint: an optional String, with the host with optional port. Defaults to "cloudkms.googleapis.com".
      • *
      *

      - * If the kmsProvider is "kmip" the master key is required and must contain the following fields: + * If the kmsProvider type is "kmip" the master key is required and must contain the following fields: *

      *
        *
      • keyId: optional String, keyId is the KMIP Unique Identifier to a 96 byte KMIP Secret Data managed object. If keyId is @@ -133,7 +133,7 @@ public List getKeyAltNames() { * to false.
      • *
      *

      - * If the kmsProvider is "local" the masterKey is not applicable. + * If the kmsProvider type is "local" the masterKey is not applicable. *

      * @return the master key document */ diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyOptions.java index 526fa79d468..e941694d1e7 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyOptions.java @@ -73,14 +73,14 @@ public RewrapManyDataKeyOptions masterKey(final BsonDocument masterKey) { * The masterKey identifies a KMS-specific key used to encrypt the new data key. *

      *

      - * If the kmsProvider is "aws" the master key is required and must contain the following fields: + * If the kmsProvider type is "aws" the master key is required and must contain the following fields: *

      *
        *
      • region: a String containing the AWS region in which to locate the master key
      • *
      • key: a String containing the Amazon Resource Name (ARN) to the AWS customer master key
      • *
      *

      - * If the kmsProvider is "azure" the master key is required and must contain the following fields: + * If the kmsProvider type is "azure" the master key is required and must contain the following fields: *

      *
        *
      • keyVaultEndpoint: a String with the host name and an optional port. Example: "example.vault.azure.net".
      • @@ -88,7 +88,7 @@ public RewrapManyDataKeyOptions masterKey(final BsonDocument masterKey) { *
      • keyVersion: an optional String, the specific version of the named key, defaults to using the key's primary version.
      • *
      *

      - * If the kmsProvider is "gcp" the master key is required and must contain the following fields: + * If the kmsProvider type is "gcp" the master key is required and must contain the following fields: *

      *
        *
      • projectId: a String
      • @@ -99,7 +99,7 @@ public RewrapManyDataKeyOptions masterKey(final BsonDocument masterKey) { *
      • endpoint: an optional String, with the host with optional port. Defaults to "cloudkms.googleapis.com".
      • *
      *

      - * If the kmsProvider is "kmip" the master key is required and must contain the following fields: + * If the kmsProvider type is "kmip" the master key is required and must contain the following fields: *

      *
        *
      • keyId: optional String, keyId is the KMIP Unique Identifier to a 96 byte KMIP Secret Data managed object. If keyId is @@ -108,7 +108,7 @@ public RewrapManyDataKeyOptions masterKey(final BsonDocument masterKey) { * defaults to the required endpoint from the KMS providers map.
      • *
      *

      - * If the kmsProvider is "local" the masterKey is not applicable. + * If the kmsProvider type is "local" the masterKey is not applicable. *

      * @return the master key document */ diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/namedKMS.json b/driver-core/src/test/resources/client-side-encryption/legacy/namedKMS.json new file mode 100644 index 00000000000..394a6ac5484 --- /dev/null +++ b/driver-core/src/test/resources/client-side-encryption/legacy/namedKMS.json @@ -0,0 +1,197 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ], + "tests": [ + { + "description": "Automatically encrypt and decrypt with a named KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local:name2": { + "key": { + "$binary": { + "base64": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} \ No newline at end of file diff --git a/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-createDataKey.json b/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-createDataKey.json new file mode 100644 index 00000000000..4d75e4cf51e --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-createDataKey.json @@ -0,0 +1,396 @@ +{ + "description": "namedKMS-createDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with named AWS KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws:name1", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named Azure KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure:name1", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named GCP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp:name1", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named KMIP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named local KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local:name1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-explicit.json b/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-explicit.json new file mode 100644 index 00000000000..e28d7e8b303 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-explicit.json @@ -0,0 +1,130 @@ +{ + "description": "namedKMS-explicit", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name2" + ], + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ] + } + ], + "tests": [ + { + "description": "can explicitly encrypt with a named KMS provider", + "operations": [ + { + "name": "encrypt", + "object": "clientEncryption0", + "arguments": { + "value": "foobar", + "opts": { + "keyAltName": "local:name2", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "expectResult": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + } + ] + }, + { + "description": "can explicitly decrypt with a named KMS provider", + "operations": [ + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-rewrapManyDataKey.json b/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-rewrapManyDataKey.json new file mode 100644 index 00000000000..b3b9bd24777 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/client-side-encryption/namedKMS-rewrapManyDataKey.json @@ -0,0 +1,1385 @@ +{ + "description": "namedKMS-rewrapManyDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + }, + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + }, + "aws:name2": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip:name1", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local:name1" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap to aws:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name1", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to azure:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "opts": { + "provider": "azure:name1", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to gcp:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "opts": { + "provider": "gcp:name1", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to kmip:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "opts": { + "provider": "kmip:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to local:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "opts": { + "provider": "local:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from local:name1 to local:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "opts": { + "provider": "local:name2" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name2" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from aws:name1 to aws:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name2", + "masterKey": { + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name2", + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java index 51a80e7739d..b5b6c7101b5 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java @@ -22,11 +22,14 @@ import com.mongodb.MongoConfigurationException; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.crypt.capi.MongoCryptException; import com.mongodb.lang.NonNull; import com.mongodb.lang.Nullable; import org.bson.BsonBinary; import org.bson.BsonDocument; import org.bson.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -47,6 +50,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeFalse; import static org.junit.jupiter.api.Assumptions.assumeTrue; @@ -192,6 +196,87 @@ public void shouldThrowMongoConfigurationIfSupplierReturnsDoesSomethingUnexpecte } } + + /** + * This is a custom prose tests to enhance coverage. + *

      + * This test specifically verifies the following part of the specification: + *

        + *
      • KMS providers that include a name (e.g., "aws:myname") do not support automatic credentials.
      • + *
      • Configuring a named KMS provider for automatic credentials will result in a runtime error from libmongocrypt.
      • + *
      + *

      + * Detailed specification reference: + * Client-Side Encryption Spec + */ + @Test + @DisplayName("Throw MongoCryptException when configured for automatic/on-demand credentials in ClientEncryptionSettings") + void shouldThrowMongoCryptExceptionWhenNamedKMSProviderUsesEmptyOnDemandCredentialsWithEncryptionSettings() { + assumeTrue(serverVersionAtLeast(4, 2)); + assumeTrue(isClientSideEncryptionTest()); + + Map> kmsProviders = new HashMap>() {{ + put("aws:name", new HashMap<>()); + }}; + + Map>> kmsProviderPropertySuppliers = new HashMap<>(); + kmsProviderPropertySuppliers.put("aws:name", () -> Assertions.fail("Supplier should not be called")); + + ClientEncryptionSettings settings = ClientEncryptionSettings.builder() + .keyVaultNamespace("test.datakeys") + .kmsProviders(kmsProviders) + .kmsProviderPropertySuppliers(kmsProviderPropertySuppliers) + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .build(); + + MongoCryptException e = assertThrows(MongoCryptException.class, () -> { + try (ClientEncryption ignore = createClientEncryption(settings)) {//NOP + } + }); + assertTrue(e.getMessage().contains("On-demand credentials are not supported for named KMS providers.")); + } + + /** + * This is a custom prose tests to enhance coverage. + *

      + * This test specifically verifies the following part of the specification: + *

        + *
      • KMS providers that include a name (e.g., "aws:myname") do not support automatic credentials.
      • + *
      • Configuring a named KMS provider for automatic credentials will result in a runtime error from libmongocrypt.
      • + *
      + *

      + * Detailed specification reference: + * Client-Side Encryption Spec + */ + @Test + @DisplayName("Throw MongoCryptException when configured for automatic/on-demand credentials in AutoEncryptionSettings") + public void shouldThrowMongoCryptExceptionWhenNamedKMSProviderUsesEmptyOnDemandCredentialsWithAutoEncryptionSettings() { + assumeTrue(serverVersionAtLeast(4, 2)); + assumeTrue(isClientSideEncryptionTest()); + + Map> kmsProviders = new HashMap>() {{ + put("aws:name", new HashMap<>()); + }}; + + Map>> kmsProviderPropertySuppliers = new HashMap<>(); + kmsProviderPropertySuppliers.put("aws:name", () -> Assertions.fail("Supplier should not be called")); + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .kmsProviders(kmsProviders) + .keyVaultNamespace("test.datakeys") + .build(); + + MongoCryptException e = assertThrows(MongoCryptException.class, () -> { + try (MongoClient ignore = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build())) {//NOP + } + }); + assertTrue(e.getMessage().contains("On-demand credentials are not supported for named KMS providers.")); + } + + + @Test public void shouldIgnoreSupplierIfKmsProviderMapValueIsNotEmpty() { assumeTrue(serverVersionAtLeast(4, 2)); diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionKmsTlsTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionKmsTlsTest.java index da400a206c2..e543319270e 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionKmsTlsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionKmsTlsTest.java @@ -46,7 +46,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; - public abstract class AbstractClientSideEncryptionKmsTlsTest { private static final String SYSTEM_PROPERTY_KEY = "org.mongodb.test.kms.tls.error.type"; @@ -93,6 +92,10 @@ static TlsErrorType fromSystemPropertyValue(final String value) { @NonNull public abstract ClientEncryption getClientEncryption(ClientEncryptionSettings settings); + /** + * See + * 10. KMS TLS Tests. + */ @Test public void testInvalidKmsCertificate() { assumeTrue(System.getProperties().containsKey(SYSTEM_PROPERTY_KEY)); @@ -120,6 +123,10 @@ public void testInvalidKmsCertificate() { } } + /** + * See + * 11. KMS TLS Options Tests. + */ @Test() public void testThatCustomSslContextIsUsed() { assumeTrue(serverVersionAtLeast(4, 2)); @@ -131,10 +138,14 @@ public void testThatCustomSslContextIsUsed() { .keyVaultNamespace("keyvault.datakeys") .kmsProviders(kmsProviders) .kmsProviderSslContextMap(new HashMap() {{ - put("aws", getUntrustingSslContext()); - put("azure", getUntrustingSslContext()); - put("gcp", getUntrustingSslContext()); - put("kmip", getUntrustingSslContext()); + put("aws", getUntrustingSslContext("aws")); + put("aws:named", getUntrustingSslContext("aws:named")); + put("azure", getUntrustingSslContext("azure")); + put("azure:named", getUntrustingSslContext("azure:named")); + put("gcp", getUntrustingSslContext("gcp")); + put("gcp:named", getUntrustingSslContext("gcp:named")); + put("kmip", getUntrustingSslContext("kmip")); + put("kmip:named", getUntrustingSslContext("kmip:named")); }}) .build(); try (ClientEncryption clientEncryption = getClientEncryption(clientEncryptionSettings)) { @@ -144,7 +155,7 @@ public void testThatCustomSslContextIsUsed() { clientEncryption.createDataKey(curProvider, new DataKeyOptions().masterKey( BsonDocument.parse(getMasterKey(curProvider))))); while (e != null) { - if (e.getMessage().contains("Don't trust anything")) { + if (e.getMessage().contains("Don't trust " + curProvider)) { break outer; } e = e.getCause(); @@ -160,35 +171,56 @@ private HashMap> getKmsProviders() { put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); }}); + put("aws:named", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); put("azure", new HashMap() {{ put("tenantId", getEnv("AZURE_TENANT_ID")); put("clientId", getEnv("AZURE_CLIENT_ID")); put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); put("identityPlatformEndpoint", "login.microsoftonline.com:443"); }}); + put("azure:named", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + put("identityPlatformEndpoint", "login.microsoftonline.com:443"); + }}); put("gcp", new HashMap() {{ put("email", getEnv("GCP_EMAIL")); put("privateKey", getEnv("GCP_PRIVATE_KEY")); put("endpoint", "oauth2.googleapis.com:443"); }}); + put("gcp:named", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + put("endpoint", "oauth2.googleapis.com:443"); + }}); put("kmip", new HashMap() {{ put("endpoint", "localhost:5698"); }}); + put("kmip:named", new HashMap() {{ + put("endpoint", "localhost:5698"); + }}); }}; } String getMasterKey(final String kmsProvider) { switch (kmsProvider) { case "aws": + case "aws:named": return "{" + "region: \"us-east-1\", " + "key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"}"; case "azure": + case "azure:named": return "{" + " \"keyVaultEndpoint\": \"key-vault-csfle.vault.azure.net\"," + " \"keyName\": \"key-name-csfle\"" + "}"; case "gcp": + case "gcp:named": return "{" + " \"projectId\": \"devprod-drivers\"," + " \"location\": \"global\", " @@ -196,13 +228,14 @@ String getMasterKey(final String kmsProvider) { + " \"keyName\": \"key-name-csfle\"" + "}"; case "kmip": + case "kmip:named": return "{}"; default: throw new UnsupportedOperationException("Unsupported KMS provider: " + kmsProvider); } } - private SSLContext getUntrustingSslContext() { + private SSLContext getUntrustingSslContext(final String kmsProvider) { try { TrustManager untrustingTrustManager = new X509TrustManager() { public X509Certificate[] getAcceptedIssuers() { @@ -213,7 +246,7 @@ public void checkClientTrusted(final X509Certificate[] certs, final String authT } public void checkServerTrusted(final X509Certificate[] certs, final String authType) throws CertificateException { - throw new CertificateException("Don't trust anything"); + throw new CertificateException("Don't trust " + kmsProvider); } }; diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java index 25abafc65ee..87341a795ec 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java @@ -255,6 +255,7 @@ public void setUp() { kmsProviderMap.put("endpoint", getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698")); break; case "local": + case "local:name2": kmsProviderMap.put("key", kmsProviderOptions.getBinary("key").getData()); break; default: diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedClientEncryptionHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedClientEncryptionHelper.java index d7ac0450844..8e545841c6a 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedClientEncryptionHelper.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedClientEncryptionHelper.java @@ -18,13 +18,17 @@ import com.mongodb.bulk.BulkWriteResult; import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; import com.mongodb.client.model.vault.RewrapManyDataKeyResult; import com.mongodb.client.result.DeleteResult; import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.lang.Nullable; import org.bson.BsonArray; +import org.bson.BsonBinary; import org.bson.BsonDocument; import org.bson.BsonInt32; +import org.bson.BsonString; import org.bson.BsonValue; import java.util.ArrayList; @@ -60,9 +64,14 @@ static Map> createKmsProvidersMap(final BsonDocument Map kmsProviderMap = new HashMap<>(); switch (kmsProviderKey) { case "aws": + case "aws:name1": setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_ACCESS_KEY_ID"); setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_SECRET_ACCESS_KEY"); break; + case "aws:name2": + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_ACCESS_KEY_ID_AWS_KMS_NAMED"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED"); + break; case "awsTemporary": setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_TEMP_ACCESS_KEY_ID"); setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_TEMP_SECRET_ACCESS_KEY"); @@ -73,20 +82,41 @@ static Map> createKmsProvidersMap(final BsonDocument setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_TEMP_SECRET_ACCESS_KEY"); break; case "azure": + case "azure:name1": setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "tenantId", "AZURE_TENANT_ID"); setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "clientId", "AZURE_CLIENT_ID"); setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "clientSecret", "AZURE_CLIENT_SECRET"); break; case "gcp": + case "gcp:name1": setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "email", "GCP_EMAIL"); setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "privateKey", "GCP_PRIVATE_KEY"); break; case "kmip": - setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "endpoint", () -> - getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698")); + case "kmip:name1": + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + "endpoint", + () -> getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698"), + null); break; case "local": - setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "key", UnifiedClientEncryptionHelper::localKmsProviderKey); + case "local:name1": + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + "key", + UnifiedClientEncryptionHelper::localKmsProviderKey, + null); + break; + case "local:name2": + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + "key", + null, + () -> decodeLocalKmsProviderKey(kmsProviderOptions.getString("key").getValue())); break; default: throw new UnsupportedOperationException("Unsupported KMS provider: " + kmsProviderKey); @@ -97,29 +127,48 @@ static Map> createKmsProvidersMap(final BsonDocument } public static byte[] localKmsProviderKey() { - return Base64.getDecoder().decode( - "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" - + "GJkTXVyZG9uSjFk"); + return decodeLocalKmsProviderKey("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" + + "GJkTXVyZG9uSjFk"); + } + + public static byte[] decodeLocalKmsProviderKey(final String key) { + return Base64.getDecoder().decode(key); } + private static void setKmsProviderProperty(final Map kmsProviderMap, final BsonDocument kmsProviderOptions, final String key, final String propertyName) { - setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, key, () -> { - if (getEnv(propertyName) != null) { - return getEnv(propertyName); - } - throw new UnsupportedOperationException("Missing system property for: " + key); - }); + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + key, + () -> { + if (getEnv(propertyName) != null) { + return getEnv(propertyName); + } + throw new UnsupportedOperationException("Missing system property for: " + key); + }, + null); } private static void setKmsProviderProperty(final Map kmsProviderMap, - final BsonDocument kmsProviderOptions, final String key, final Supplier propertySupplier) { + final BsonDocument kmsProviderOptions, final String key, + @Nullable final Supplier placeholderPropertySupplier, + @Nullable final Supplier explicitPropertySupplier) { if (kmsProviderOptions.containsKey(key)) { - if (kmsProviderOptions.get(key).equals(PLACEHOLDER)) { - kmsProviderMap.put(key, propertySupplier.get()); - } else { - throw new UnsupportedOperationException("Missing key handler for: " + key + " :: " + kmsProviderOptions.toJson()); + boolean isPlaceholderValue = kmsProviderOptions.get(key).equals(PLACEHOLDER); + if (isPlaceholderValue) { + if (placeholderPropertySupplier == null) { + throw new UnsupportedOperationException("Placeholder is not supported for: " + key + " :: " + kmsProviderOptions.toJson()); + } + kmsProviderMap.put(key, placeholderPropertySupplier.get()); + return; } + + if (explicitPropertySupplier == null) { + throw new UnsupportedOperationException("Non-placeholder value is not supported for: " + key + " :: " + kmsProviderOptions.toJson()); + } + kmsProviderMap.put(key, explicitPropertySupplier.get()); } } @@ -207,6 +256,38 @@ OperationResult executeRewrapManyDataKey(final BsonDocument operation) { return resultOf(() -> toExpected(clientEncryption.rewrapManyDataKey(filter, rewrapManyDataKeyOptions))); } + OperationResult executeEncrypt(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments"); + BsonDocument options = arguments.getDocument("opts"); + + BsonString value = arguments.getString("value"); + String algorithm = options.remove("algorithm") + .asString() + .getValue(); + + EncryptOptions encryptOptions = new EncryptOptions(algorithm); + for (String key : options.keySet()) { + switch (key) { + case "keyAltName": + encryptOptions.keyAltName(options.getString("keyAltName").getValue()); + break; + default: + throw new UnsupportedOperationException("Missing key handler for: " + key + " :: " + options.toJson()); + } + } + return resultOf(() -> clientEncryption.encrypt(value, encryptOptions)); + } + + + OperationResult executeDecrypt(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments"); + BsonBinary value = arguments.getBinary("value"); + + return resultOf(() -> clientEncryption.decrypt(value)); + } + private BsonDocument toExpected(final DeleteResult result) { if (result.wasAcknowledged()) { return new BsonDocument("deletedCount", new BsonInt32(toIntExact(result.getDeletedCount()))); diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java index ae7ad39a2f5..58ad07034ec 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java @@ -574,6 +574,10 @@ private OperationResult executeOperation(final UnifiedTestContext context, final return clientEncryptionHelper.executeGetKeyByAltName(operation); case "rewrapManyDataKey": return clientEncryptionHelper.executeRewrapManyDataKey(operation); + case "encrypt": + return clientEncryptionHelper.executeEncrypt(operation); + case "decrypt": + return clientEncryptionHelper.executeDecrypt(operation); default: throw new UnsupportedOperationException("Unsupported test operation: " + name); } From 14fc2fa033ad270bf30c6520e8475aa577998ae7 Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Fri, 26 Jul 2024 16:13:49 -0400 Subject: [PATCH 50/90] Support any number of Document Sequence Sections in CommandMessage#getCommandDocument (#1456) JAVA-5536 --- .../connection/ByteBufBsonDocument.java | 47 ++++------ .../internal/connection/CommandMessage.java | 92 +++++++++++++++---- 2 files changed, 90 insertions(+), 49 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonDocument.java b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonDocument.java index 5ab265c2bc8..70ed10a75a8 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonDocument.java +++ b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonDocument.java @@ -53,38 +53,31 @@ final class ByteBufBsonDocument extends BsonDocument { private final transient ByteBuf byteBuf; - static List createList(final ByteBufferBsonOutput bsonOutput, final int startPosition) { - List duplicateByteBuffers = bsonOutput.getByteBuffers(); - CompositeByteBuf outputByteBuf = new CompositeByteBuf(duplicateByteBuffers); - outputByteBuf.position(startPosition); + /** + * Create a list of ByteBufBsonDocument from a buffer positioned at the start of the first document of an OP_MSG Section + * of type Document Sequence (Kind 1). + *

      + * The provided buffer will be positioned at the end of the section upon normal completion of the method + */ + static List createList(final ByteBuf outputByteBuf) { List documents = new ArrayList<>(); - int curDocumentStartPosition = startPosition; while (outputByteBuf.hasRemaining()) { - int documentSizeInBytes = outputByteBuf.getInt(); - ByteBuf slice = outputByteBuf.duplicate(); - slice.position(curDocumentStartPosition); - slice.limit(curDocumentStartPosition + documentSizeInBytes); - documents.add(new ByteBufBsonDocument(slice)); - curDocumentStartPosition += documentSizeInBytes; - outputByteBuf.position(outputByteBuf.position() + documentSizeInBytes - 4); - } - for (ByteBuf byteBuffer : duplicateByteBuffers) { - byteBuffer.release(); + ByteBufBsonDocument curDocument = createOne(outputByteBuf); + documents.add(curDocument); } return documents; } - static ByteBufBsonDocument createOne(final ByteBufferBsonOutput bsonOutput, final int startPosition) { - List duplicateByteBuffers = bsonOutput.getByteBuffers(); - CompositeByteBuf outputByteBuf = new CompositeByteBuf(duplicateByteBuffers); - outputByteBuf.position(startPosition); + /** + * Create a ByteBufBsonDocument from a buffer positioned at the start of a BSON document. + * The provided buffer will be positioned at the end of the document upon normal completion of the method + */ + static ByteBufBsonDocument createOne(final ByteBuf outputByteBuf) { + int documentStart = outputByteBuf.position(); int documentSizeInBytes = outputByteBuf.getInt(); - ByteBuf slice = outputByteBuf.duplicate(); - slice.position(startPosition); - slice.limit(startPosition + documentSizeInBytes); - for (ByteBuf byteBuffer : duplicateByteBuffers) { - byteBuffer.release(); - } + int documentEnd = documentStart + documentSizeInBytes; + ByteBuf slice = outputByteBuf.duplicate().position(documentStart).limit(documentEnd); + outputByteBuf.position(documentEnd); return new ByteBufBsonDocument(slice); } @@ -138,10 +131,6 @@ T findInDocument(final Finder finder) { return finder.notFound(); } - int getSizeInBytes() { - return byteBuf.getInt(byteBuf.position()); - } - BsonDocument toBaseBsonDocument() { ByteBuf duplicateByteBuf = byteBuf.duplicate(); try (BsonBinaryReader bsonReader = new BsonBinaryReader(new ByteBufferBsonInput(duplicateByteBuf))) { diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java index 53d869a6b8f..c5cd3491ec8 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java @@ -17,6 +17,7 @@ package com.mongodb.internal.connection; import com.mongodb.MongoClientException; +import com.mongodb.MongoInternalException; import com.mongodb.MongoNamespace; import com.mongodb.ReadPreference; import com.mongodb.ServerApi; @@ -31,9 +32,12 @@ import org.bson.BsonElement; import org.bson.BsonInt64; import org.bson.BsonString; +import org.bson.ByteBuf; import org.bson.FieldNameValidator; import org.bson.io.BsonOutput; +import java.io.ByteArrayOutputStream; +import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -47,6 +51,8 @@ import static com.mongodb.connection.ServerType.SHARD_ROUTER; import static com.mongodb.connection.ServerType.STANDALONE; import static com.mongodb.internal.connection.BsonWriterHelper.writePayload; +import static com.mongodb.internal.connection.ByteBufBsonDocument.createList; +import static com.mongodb.internal.connection.ByteBufBsonDocument.createOne; import static com.mongodb.internal.connection.ReadConcernHelper.getReadConcernDocument; import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_TWO_WIRE_VERSION; import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_ZERO_WIRE_VERSION; @@ -108,30 +114,76 @@ public final class CommandMessage extends RequestMessage { this.serverApi = serverApi; } + /** + * Create a BsonDocument representing the logical document encoded by an OP_MSG. + *

      + * The returned document will contain all the fields from the Body (Kind 0) Section, as well as all fields represented by + * OP_MSG Document Sequence (Kind 1) Sections. + */ BsonDocument getCommandDocument(final ByteBufferBsonOutput bsonOutput) { - ByteBufBsonDocument byteBufBsonDocument = ByteBufBsonDocument.createOne(bsonOutput, - getEncodingMetadata().getFirstDocumentPosition()); - BsonDocument commandBsonDocument; - - if (containsPayload()) { - commandBsonDocument = byteBufBsonDocument.toBaseBsonDocument(); - - int payloadStartPosition = getEncodingMetadata().getFirstDocumentPosition() - + byteBufBsonDocument.getSizeInBytes() - + 1 // payload type - + 4 // payload size - + payload.getPayloadName().getBytes(StandardCharsets.UTF_8).length + 1; // null-terminated UTF-8 payload name - commandBsonDocument.append(payload.getPayloadName(), - new BsonArray(ByteBufBsonDocument.createList(bsonOutput, payloadStartPosition))); - } else { - commandBsonDocument = byteBufBsonDocument; + List byteBuffers = bsonOutput.getByteBuffers(); + try { + CompositeByteBuf byteBuf = new CompositeByteBuf(byteBuffers); + try { + byteBuf.position(getEncodingMetadata().getFirstDocumentPosition()); + ByteBufBsonDocument byteBufBsonDocument = createOne(byteBuf); + + // If true, it means there is at least one Kind 1:Document Sequence in the OP_MSG + if (byteBuf.hasRemaining()) { + BsonDocument commandBsonDocument = byteBufBsonDocument.toBaseBsonDocument(); + + // Each loop iteration processes one Document Sequence + // When there are no more bytes remaining, there are no more Document Sequences + while (byteBuf.hasRemaining()) { + // skip reading the payload type, we know it is 1 + byteBuf.position(byteBuf.position() + 1); + int sequenceStart = byteBuf.position(); + int sequenceSizeInBytes = byteBuf.getInt(); + int sectionEnd = sequenceStart + sequenceSizeInBytes; + + String fieldName = getSequenceIdentifier(byteBuf); + // If this assertion fires, it means that the driver has started using document sequences for nested fields. If + // so, this method will need to change in order to append the value to the correct nested document. + assertFalse(fieldName.contains(".")); + + ByteBuf documentsByteBufSlice = byteBuf.duplicate().limit(sectionEnd); + try { + commandBsonDocument.append(fieldName, new BsonArray(createList(documentsByteBufSlice))); + } finally { + documentsByteBufSlice.release(); + } + byteBuf.position(sectionEnd); + } + return commandBsonDocument; + } else { + return byteBufBsonDocument; + } + } finally { + byteBuf.release(); + } + } finally { + byteBuffers.forEach(ByteBuf::release); } - - return commandBsonDocument; } - boolean containsPayload() { - return payload != null; + /** + * Get the field name from a buffer positioned at the start of the document sequence identifier of an OP_MSG Section of type + * Document Sequence (Kind 1). + *

      + * Upon normal completion of the method, the buffer will be positioned at the start of the first BSON object in the sequence. + */ + private String getSequenceIdentifier(final ByteBuf byteBuf) { + ByteArrayOutputStream sequenceIdentifierBytes = new ByteArrayOutputStream(); + byte curByte = byteBuf.get(); + while (curByte != 0) { + sequenceIdentifierBytes.write(curByte); + curByte = byteBuf.get(); + } + try { + return sequenceIdentifierBytes.toString(StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new MongoInternalException("Unexpected exception", e); + } } boolean isResponseExpected() { From cd297a13d1868e6a22b88529016a17fda618363d Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Fri, 26 Jul 2024 17:45:51 -0700 Subject: [PATCH 51/90] Correct Scaladoc references to methods and classes that were producing warnings. (#1463) --- .../org/mongodb/scala/AggregateObservable.scala | 2 +- .../org/mongodb/scala/DistinctObservable.scala | 2 +- .../scala/org/mongodb/scala/FindObservable.scala | 2 +- .../scala/ListCollectionsObservable.scala | 2 +- .../mongodb/scala/ListDatabasesObservable.scala | 2 +- .../mongodb/scala/ListIndexesObservable.scala | 2 +- .../scala/ListSearchIndexesObservable.scala | 2 +- .../org/mongodb/scala/MapReduceObservable.scala | 2 +- .../org/mongodb/scala/gridfs/GridFSBucket.scala | 16 ++++++++-------- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala index 1a360c1a7c1..d496a4ab8a2 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala @@ -202,7 +202,7 @@ case class AggregateObservable[TResult](private val wrapped: AggregatePublisher[ /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * If the `timeout` is set then: diff --git a/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala index 4a50d7767e1..252758f8f99 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala @@ -114,7 +114,7 @@ case class DistinctObservable[TResult](private val wrapped: DistinctPublisher[TR /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * @param timeoutMode the timeout mode diff --git a/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala index c7cb7a158ae..57a964b8315 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala @@ -336,7 +336,7 @@ case class FindObservable[TResult](private val wrapped: FindPublisher[TResult]) /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * If the `timeout` is set then: diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala index c73fbb7118e..3e34de87dfe 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala @@ -99,7 +99,7 @@ case class ListCollectionsObservable[TResult](wrapped: ListCollectionsPublisher[ /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * @param timeoutMode the timeout mode diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala index 0b5d5bf2f93..8fd7f41843c 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala @@ -128,7 +128,7 @@ case class ListDatabasesObservable[TResult](wrapped: ListDatabasesPublisher[TRes /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * @param timeoutMode the timeout mode diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala index fa8e3d1b24d..f6ab4c53c10 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala @@ -86,7 +86,7 @@ case class ListIndexesObservable[TResult](wrapped: ListIndexesPublisher[TResult] /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * @param timeoutMode the timeout mode diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala index 3987e830732..e1aee7dce1a 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala @@ -126,7 +126,7 @@ case class ListSearchIndexesObservable[TResult](wrapped: ListSearchIndexesPublis /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * If the `timeout` is set then: diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala index 0ccabdaea62..0ed78bb775b 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala @@ -225,7 +225,7 @@ case class MapReduceObservable[TResult](wrapped: MapReducePublisher[TResult]) ex /** * Sets the timeoutMode for the cursor. * - * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]], + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], * via [[MongoDatabase]] or via [[MongoCollection]] * * @param timeoutMode the timeout mode diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala index b828fe6074f..15849798fe3 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala @@ -183,7 +183,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param filename the filename for the stream @@ -201,7 +201,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param filename the filename for the stream @@ -224,7 +224,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param id the custom id value of the file @@ -247,7 +247,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param id the custom id value of the file @@ -272,7 +272,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param clientSession the client session with which to associate this operation @@ -296,7 +296,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param clientSession the client session with which to associate this operation @@ -322,7 +322,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param clientSession the client session with which to associate this operation @@ -348,7 +348,7 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) { * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. * * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] - * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. * * @param clientSession the client session with which to associate this operation From ab72460c69e255f39083d7f68f6444550b810ca1 Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Mon, 29 Jul 2024 10:40:32 +0100 Subject: [PATCH 52/90] Fix bson-kotlinx `encodeNullableSerializableValue` null handling (#1453) Ensures that the deferredElement name is reset correctly. Test case ported to bson-kotlin JAVA-5524 --- .../bson/codecs/kotlin/DataClassCodecTest.kt | 16 +++++ .../bson/codecs/kotlin/samples/DataClasses.kt | 4 ++ .../org/bson/codecs/kotlinx/BsonEncoder.kt | 69 +++++++++++-------- .../kotlinx/KotlinSerializerCodecTest.kt | 23 +++++++ .../codecs/kotlinx/samples/DataClasses.kt | 5 ++ 5 files changed, 90 insertions(+), 27 deletions(-) diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt index 40abc3a9cfa..e3cfe530705 100644 --- a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt @@ -25,6 +25,7 @@ import org.bson.codecs.configuration.CodecConfigurationException import org.bson.codecs.configuration.CodecRegistries.fromProviders import org.bson.codecs.kotlin.samples.Box import org.bson.codecs.kotlin.samples.DataClassEmbedded +import org.bson.codecs.kotlin.samples.DataClassLastItemDefaultsToNull import org.bson.codecs.kotlin.samples.DataClassListOfDataClasses import org.bson.codecs.kotlin.samples.DataClassListOfListOfDataClasses import org.bson.codecs.kotlin.samples.DataClassListOfSealed @@ -51,6 +52,7 @@ import org.bson.codecs.kotlin.samples.DataClassWithEnum import org.bson.codecs.kotlin.samples.DataClassWithEnumMapKey import org.bson.codecs.kotlin.samples.DataClassWithFailingInit import org.bson.codecs.kotlin.samples.DataClassWithInvalidBsonRepresentation +import org.bson.codecs.kotlin.samples.DataClassWithListThatLastItemDefaultsToNull import org.bson.codecs.kotlin.samples.DataClassWithMutableList import org.bson.codecs.kotlin.samples.DataClassWithMutableMap import org.bson.codecs.kotlin.samples.DataClassWithMutableSet @@ -133,6 +135,20 @@ class DataClassCodecTest { assertDecodesTo(withStoredNulls, dataClass) } + @Test + fun testDataClassWithListThatLastItemDefaultsToNull() { + val expected = + """{ + | "elements": [{"required": "required"}, {"required": "required"}], + |}""" + .trimMargin() + + val dataClass = + DataClassWithListThatLastItemDefaultsToNull( + listOf(DataClassLastItemDefaultsToNull("required"), DataClassLastItemDefaultsToNull("required"))) + assertRoundTrips(expected, dataClass) + } + @Test fun testDataClassWithNullableGenericsNotNull() { val expected = diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt index 5bc6e768ed8..aa2c8983b1d 100644 --- a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt @@ -57,6 +57,10 @@ data class DataClassWithDefaults( data class DataClassWithNulls(val boolean: Boolean?, val string: String?, val listSimple: List?) +data class DataClassWithListThatLastItemDefaultsToNull(val elements: List) + +data class DataClassLastItemDefaultsToNull(val required: String, val optional: String? = null) + data class DataClassSelfReferential( val name: String, val left: DataClassSelfReferential? = null, diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt index b3ae0c8cdf4..75080254cdb 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt @@ -72,7 +72,7 @@ internal class DefaultBsonEncoder( private var isPolymorphic = false private var state = STATE.VALUE private var mapState = MapState() - private var deferredElementName: String? = null + private val deferredElementHandler: DeferredElementHandler = DeferredElementHandler() override fun shouldEncodeElementDefault(descriptor: SerialDescriptor, index: Int): Boolean = configuration.encodeDefaults @@ -117,7 +117,7 @@ internal class DefaultBsonEncoder( is StructureKind.CLASS -> { val elementName = descriptor.getElementName(index) if (descriptor.getElementDescriptor(index).isNullable) { - deferredElementName = elementName + deferredElementHandler.set(elementName) } else { encodeName(elementName) } @@ -140,25 +140,27 @@ internal class DefaultBsonEncoder( } override fun encodeSerializableValue(serializer: SerializationStrategy, value: T) { - deferredElementName?.let { - if (value != null || configuration.explicitNulls) { - encodeName(it) - super.encodeSerializableValue(serializer, value) - } else { - deferredElementName = null - } - } - ?: super.encodeSerializableValue(serializer, value) + deferredElementHandler.with( + { + // When using generics its possible for `value` to be null + // See: https://youtrack.jetbrains.com/issue/KT-66206 + if (value != null || configuration.explicitNulls) { + encodeName(it) + super.encodeSerializableValue(serializer, value) + } + }, + { super.encodeSerializableValue(serializer, value) }) } override fun encodeNullableSerializableValue(serializer: SerializationStrategy, value: T?) { - deferredElementName?.let { - if (value != null || configuration.explicitNulls) { - encodeName(it) - super.encodeNullableSerializableValue(serializer, value) - } - } - ?: super.encodeNullableSerializableValue(serializer, value) + deferredElementHandler.with( + { + if (value != null || configuration.explicitNulls) { + encodeName(it) + super.encodeNullableSerializableValue(serializer, value) + } + }, + { super.encodeNullableSerializableValue(serializer, value) }) } override fun encodeByte(value: Byte) = encodeInt(value.toInt()) @@ -170,14 +172,7 @@ internal class DefaultBsonEncoder( override fun encodeDouble(value: Double) = writer.writeDouble(value) override fun encodeInt(value: Int) = writer.writeInt32(value) override fun encodeLong(value: Long) = writer.writeInt64(value) - override fun encodeNull() { - deferredElementName?.let { - if (configuration.explicitNulls) { - encodeName(it) - } - } - writer.writeNull() - } + override fun encodeNull() = writer.writeNull() override fun encodeString(value: String) { when (state) { @@ -206,7 +201,6 @@ internal class DefaultBsonEncoder( private fun encodeName(value: Any) { writer.writeName(value.toString()) - deferredElementName = null state = STATE.VALUE } @@ -229,4 +223,25 @@ internal class DefaultBsonEncoder( return getState() } } + + private class DeferredElementHandler { + private var deferredElementName: String? = null + + fun set(name: String) { + assert(deferredElementName == null) { -> "Overwriting an existing deferred name" } + deferredElementName = name + } + + fun with(actionWithDeferredElement: (String) -> Unit, actionWithoutDeferredElement: () -> Unit): Unit { + deferredElementName?.let { + reset() + actionWithDeferredElement(it) + } + ?: actionWithoutDeferredElement() + } + + private fun reset() { + deferredElementName = null + } + } } diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt index ed9e1bfb43a..05a0d3ffd7d 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt @@ -45,6 +45,7 @@ import org.bson.codecs.kotlinx.samples.DataClassContainsOpen import org.bson.codecs.kotlinx.samples.DataClassContainsValueClass import org.bson.codecs.kotlinx.samples.DataClassEmbedded import org.bson.codecs.kotlinx.samples.DataClassKey +import org.bson.codecs.kotlinx.samples.DataClassLastItemDefaultsToNull import org.bson.codecs.kotlinx.samples.DataClassListOfDataClasses import org.bson.codecs.kotlinx.samples.DataClassListOfListOfDataClasses import org.bson.codecs.kotlinx.samples.DataClassListOfSealed @@ -78,6 +79,7 @@ import org.bson.codecs.kotlinx.samples.DataClassWithEncodeDefault import org.bson.codecs.kotlinx.samples.DataClassWithEnum import org.bson.codecs.kotlinx.samples.DataClassWithEnumMapKey import org.bson.codecs.kotlinx.samples.DataClassWithFailingInit +import org.bson.codecs.kotlinx.samples.DataClassWithListThatLastItemDefaultsToNull import org.bson.codecs.kotlinx.samples.DataClassWithMutableList import org.bson.codecs.kotlinx.samples.DataClassWithMutableMap import org.bson.codecs.kotlinx.samples.DataClassWithMutableSet @@ -255,6 +257,27 @@ class KotlinSerializerCodecTest { assertRoundTrips(expectedNulls, dataClass, altConfiguration) } + @Test + fun testDataClassWithListThatLastItemDefaultsToNull() { + val expectedWithOutNulls = + """{ + | "elements": [{"required": "required"}, {"required": "required"}], + |}""" + .trimMargin() + + val dataClass = + DataClassWithListThatLastItemDefaultsToNull( + listOf(DataClassLastItemDefaultsToNull("required"), DataClassLastItemDefaultsToNull("required"))) + assertRoundTrips(expectedWithOutNulls, dataClass) + + val expectedWithNulls = + """{ + | "elements": [{"required": "required", "optional": null}, {"required": "required", "optional": null}], + |}""" + .trimMargin() + assertRoundTrips(expectedWithNulls, dataClass, BsonConfiguration(explicitNulls = true)) + } + @Test fun testDataClassWithNullableGenericsNotNull() { val expected = diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt index 2511c7b0418..66907bff103 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt @@ -82,6 +82,11 @@ data class DataClassWithDefaults( @Serializable data class DataClassWithNulls(val boolean: Boolean?, val string: String?, val listSimple: List?) +@Serializable +data class DataClassWithListThatLastItemDefaultsToNull(val elements: List) + +@Serializable data class DataClassLastItemDefaultsToNull(val required: String, val optional: String? = null) + @Serializable data class DataClassSelfReferential( val name: String, From f3a1ef093a5ba75730c8e972dcabc8d3beeb1879 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Mon, 29 Jul 2024 09:21:39 -0700 Subject: [PATCH 53/90] Add Queryable Encryption V2 support (#1445) - Remove references to "rangePreview" in Javadoc. - Add tests for legacy specifications. - Update prose test to incorporate "Range" algorithm. JAVA-5321 --- build.gradle | 2 +- .../client/model/vault/EncryptOptions.java | 24 +- .../client/model/vault/RangeOptions.java | 26 +- .../client/vault/EncryptOptionsHelper.java | 4 + .../encryptedFields-Range-Date.json | 33 - .../encryptedFields-Range-Decimal.json | 23 - ...ncryptedFields-Range-DecimalPrecision.json | 32 - .../encryptedFields-Range-Double.json | 23 - ...encryptedFields-Range-DoublePrecision.json | 32 - .../encryptedFields-Range-Int.json | 29 - .../encryptedFields-Range-Long.json | 29 - .../range-encryptedFields-Date.json | 54 +- ...ge-encryptedFields-DecimalNoPrecision.json | 39 +- ...ange-encryptedFields-DecimalPrecision.json | 12 +- ...nge-encryptedFields-DoubleNoPrecision.json | 39 +- ...range-encryptedFields-DoublePrecision.json | 57 +- .../range-encryptedFields-Int.json | 51 +- .../range-encryptedFields-Long.json | 51 +- .../fle2v2-Range-Double-InsertFind.json | 1124 ----------- ...le2v2-Range-DoublePrecision-Aggregate.json | 581 ------ ...son => fle2v2-Rangev2-Date-Aggregate.json} | 37 +- ...n => fle2v2-Rangev2-Date-Correctness.json} | 10 +- ...e.json => fle2v2-Rangev2-Date-Delete.json} | 31 +- ...fle2v2-Rangev2-Date-FindOneAndUpdate.json} | 37 +- ...on => fle2v2-Rangev2-Date-InsertFind.json} | 37 +- ...e.json => fle2v2-Rangev2-Date-Update.json} | 37 +- ... => fle2v2-Rangev2-Decimal-Aggregate.json} | 37 +- ...> fle2v2-Rangev2-Decimal-Correctness.json} | 10 +- ...son => fle2v2-Rangev2-Decimal-Delete.json} | 31 +- ...2v2-Rangev2-Decimal-FindOneAndUpdate.json} | 37 +- ...=> fle2v2-Rangev2-Decimal-InsertFind.json} | 37 +- ...son => fle2v2-Rangev2-Decimal-Update.json} | 37 +- ...2-Rangev2-DecimalPrecision-Aggregate.json} | 37 +- ...Rangev2-DecimalPrecision-Correctness.json} | 10 +- ...e2v2-Rangev2-DecimalPrecision-Delete.json} | 31 +- ...v2-DecimalPrecision-FindOneAndUpdate.json} | 37 +- ...-Rangev2-DecimalPrecision-InsertFind.json} | 37 +- ...e2v2-Rangev2-DecimalPrecision-Update.json} | 37 +- ...n => fle2v2-Rangev2-Double-Aggregate.json} | 37 +- ...=> fle2v2-Rangev2-Double-Correctness.json} | 10 +- ...json => fle2v2-Rangev2-Double-Delete.json} | 31 +- ...e2v2-Rangev2-Double-FindOneAndUpdate.json} | 37 +- ... => fle2v2-Rangev2-Double-InsertFind.json} | 37 +- .../legacy/fle2v2-Rangev2-Double-Update.json | 1140 ++++++++++++ ...v2-Rangev2-DoublePrecision-Aggregate.json} | 10 +- ...2-Rangev2-DoublePrecision-Correctness.json | 1650 +++++++++++++++++ ...le2v2-Rangev2-DoublePrecision-Delete.json} | 31 +- ...ev2-DoublePrecision-FindOneAndUpdate.json} | 37 +- ...2-Rangev2-DoublePrecision-InsertFind.json} | 37 +- ...le2v2-Rangev2-DoublePrecision-Update.json} | 37 +- ...json => fle2v2-Rangev2-Int-Aggregate.json} | 37 +- ...on => fle2v2-Rangev2-Int-Correctness.json} | 10 +- ...te.json => fle2v2-Rangev2-Int-Delete.json} | 31 +- ... fle2v2-Rangev2-Int-FindOneAndUpdate.json} | 37 +- ...son => fle2v2-Rangev2-Int-InsertFind.json} | 37 +- ...te.json => fle2v2-Rangev2-Int-Update.json} | 37 +- ...son => fle2v2-Rangev2-Long-Aggregate.json} | 37 +- ...n => fle2v2-Rangev2-Long-Correctness.json} | 10 +- ...e.json => fle2v2-Rangev2-Long-Delete.json} | 31 +- ...fle2v2-Rangev2-Long-FindOneAndUpdate.json} | 37 +- ...on => fle2v2-Rangev2-Long-InsertFind.json} | 37 +- ...e.json => fle2v2-Rangev2-Long-Update.json} | 37 +- ...ype.json => fle2v2-Rangev2-WrongType.json} | 9 +- .../client/vault/ClientEncryption.java | 6 +- .../mongodb/scala/model/vault/package.scala | 4 +- .../scala/vault/ClientEncryption.scala | 4 +- .../client/vault/ClientEncryption.java | 6 +- ...EncryptionRangeExplicitEncryptionTest.java | 16 +- 68 files changed, 3662 insertions(+), 2687 deletions(-) delete mode 100644 driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Date.json delete mode 100644 driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Decimal.json delete mode 100644 driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DecimalPrecision.json delete mode 100644 driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Double.json delete mode 100644 driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DoublePrecision.json delete mode 100644 driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Int.json delete mode 100644 driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Long.json delete mode 100644 driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json delete mode 100644 driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Date-Aggregate.json => fle2v2-Rangev2-Date-Aggregate.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Date-Correctness.json => fle2v2-Rangev2-Date-Correctness.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Date-Delete.json => fle2v2-Rangev2-Date-Delete.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Date-FindOneAndUpdate.json => fle2v2-Rangev2-Date-FindOneAndUpdate.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Date-InsertFind.json => fle2v2-Rangev2-Date-InsertFind.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Date-Update.json => fle2v2-Rangev2-Date-Update.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Decimal-Aggregate.json => fle2v2-Rangev2-Decimal-Aggregate.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Decimal-Correctness.json => fle2v2-Rangev2-Decimal-Correctness.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Decimal-Delete.json => fle2v2-Rangev2-Decimal-Delete.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Decimal-FindOneAndUpdate.json => fle2v2-Rangev2-Decimal-FindOneAndUpdate.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Decimal-InsertFind.json => fle2v2-Rangev2-Decimal-InsertFind.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Decimal-Update.json => fle2v2-Rangev2-Decimal-Update.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DecimalPrecision-Aggregate.json => fle2v2-Rangev2-DecimalPrecision-Aggregate.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DecimalPrecision-Correctness.json => fle2v2-Rangev2-DecimalPrecision-Correctness.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DecimalPrecision-Delete.json => fle2v2-Rangev2-DecimalPrecision-Delete.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json => fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DecimalPrecision-InsertFind.json => fle2v2-Rangev2-DecimalPrecision-InsertFind.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DecimalPrecision-Update.json => fle2v2-Rangev2-DecimalPrecision-Update.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Double-Aggregate.json => fle2v2-Rangev2-Double-Aggregate.json} (98%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Double-Correctness.json => fle2v2-Rangev2-Double-Correctness.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Double-Delete.json => fle2v2-Rangev2-Double-Delete.json} (98%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Double-FindOneAndUpdate.json => fle2v2-Rangev2-Double-FindOneAndUpdate.json} (98%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Double-Update.json => fle2v2-Rangev2-Double-InsertFind.json} (98%) create mode 100644 driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DoublePrecision-Correctness.json => fle2v2-Rangev2-DoublePrecision-Aggregate.json} (99%) create mode 100644 driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DoublePrecision-Delete.json => fle2v2-Rangev2-DoublePrecision-Delete.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DoublePrecision-FindOneAndUpdate.json => fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DoublePrecision-InsertFind.json => fle2v2-Rangev2-DoublePrecision-InsertFind.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-DoublePrecision-Update.json => fle2v2-Rangev2-DoublePrecision-Update.json} (96%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Int-Aggregate.json => fle2v2-Rangev2-Int-Aggregate.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Int-Correctness.json => fle2v2-Rangev2-Int-Correctness.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Int-Delete.json => fle2v2-Rangev2-Int-Delete.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Int-FindOneAndUpdate.json => fle2v2-Rangev2-Int-FindOneAndUpdate.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Int-InsertFind.json => fle2v2-Rangev2-Int-InsertFind.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Int-Update.json => fle2v2-Rangev2-Int-Update.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Long-Aggregate.json => fle2v2-Rangev2-Long-Aggregate.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Long-Correctness.json => fle2v2-Rangev2-Long-Correctness.json} (99%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Long-Delete.json => fle2v2-Rangev2-Long-Delete.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Long-FindOneAndUpdate.json => fle2v2-Rangev2-Long-FindOneAndUpdate.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Long-InsertFind.json => fle2v2-Rangev2-Long-InsertFind.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-Long-Update.json => fle2v2-Rangev2-Long-Update.json} (95%) rename driver-core/src/test/resources/client-side-encryption/legacy/{fle2v2-Range-WrongType.json => fle2v2-Rangev2-WrongType.json} (95%) diff --git a/build.gradle b/build.gradle index 50623ee32bf..ccd32c0bc9e 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ ext { zstdVersion = '1.5.5-3' awsSdkV2Version = '2.18.9' awsSdkV1Version = '1.12.337' - mongoCryptVersion = '1.10.0-SNAPSHOT' + mongoCryptVersion = '1.11.0-SNAPSHOT' projectReactorVersion = '2022.0.0' junitBomVersion = '5.10.2' logbackVersion = '1.3.14' diff --git a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java index 509e467273b..868470ee1fc 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java @@ -52,8 +52,9 @@ public EncryptOptions(final String algorithm) { *

    22. AEAD_AES_256_CBC_HMAC_SHA_512-Random
    23. *
    24. Indexed
    25. *
    26. Unindexed
    27. - *
    28. RangePreview
    29. + *
    30. Range
    31. * + * Note: The Range algorithm is unstable. It is subject to breaking changes. * * @return the encryption algorithm */ @@ -116,8 +117,8 @@ public EncryptOptions keyAltName(final String keyAltName) { /** * The contention factor. * - *

      It is an error to set contentionFactor when algorithm is not "Indexed" or "RangePreview". - *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes.

      + *

      It is an error to set contentionFactor when algorithm is not "Indexed" or "Range". + *

      Note: The Range algorithm is unstable. It is subject to breaking changes.

      * @param contentionFactor the contention factor, which must be {@code >= 0} or null. * @return this * @since 4.7 @@ -144,9 +145,9 @@ public Long getContentionFactor() { /** * The QueryType. * - *

      Currently, we support only "equality" or "RangePreview" queryType.

      - *

      It is an error to set queryType when the algorithm is not "Indexed" or "RangePreview".

      - *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes.

      + *

      Currently, we support only "equality" or "range" queryType.

      + *

      It is an error to set queryType when the algorithm is not "Indexed" or "Range".

      + *

      Note: The Range algorithm is unstable. It is subject to breaking changes.

      * @param queryType the query type * @return this * @since 4.7 @@ -160,7 +161,8 @@ public EncryptOptions queryType(@Nullable final String queryType) { /** * Gets the QueryType. * - *

      Currently, we support only "equality" or "RangePreview" queryType.

      + *

      Currently, we support only "equality" or "range" queryType.

      + *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * @see #queryType(String) * @return the queryType or null * @since 4.7 @@ -174,12 +176,12 @@ public String getQueryType() { /** * The RangeOptions * - *

      It is an error to set RangeOptions when the algorithm is not "RangePreview". - *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. + *

      It is an error to set RangeOptions when the algorithm is not "Range". + *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * @param rangeOptions the range options * @return this * @since 4.9 - * @mongodb.server.release 6.2 + * @mongodb.server.release 8.0 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ @Beta(Reason.SERVER) @@ -192,7 +194,7 @@ public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) { * Gets the RangeOptions * @return the range options or null if not set * @since 4.9 - * @mongodb.server.release 6.2 + * @mongodb.server.release 8.0 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ @Nullable diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java index 42a6618bcdb..fcdc70281bb 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java @@ -22,14 +22,14 @@ import org.bson.BsonValue; /** - * Range options specifies index options for a Queryable Encryption field supporting "rangePreview" queries. + * Range options specifies index options for a Queryable Encryption field supporting "range" queries. * *

      {@code min}, {@code max}, {@code sparsity}, and {@code precision} must match the values set in the {@code encryptedFields} * of the destination collection. * *

      For {@code double} and {@code decimal128}, {@code min}/{@code max}/{@code precision} must all be set, or all be unset. * - *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. + *

      Note: The "Range" algorithm is unstable. It is subject to breaking changes. * @since 4.9 * @mongodb.server.release 6.2 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption @@ -39,6 +39,7 @@ public class RangeOptions { private BsonValue min; private BsonValue max; + private Integer trimFactor; private Long sparsity; private Integer precision; @@ -76,6 +77,26 @@ public RangeOptions max(@Nullable final BsonValue max) { return this; } + /** + * @return the trim factor value if set + * @since 5.2 + */ + public Integer getTrimFactor() { + return trimFactor; + } + + /** + * Set the number of top-level edges stored per record by setting a trim factor, reducing write conflicts during simultaneous inserts + * and optimizing queries by excluding seldom-used high-level edges. + * @param trimFactor the trim factor + * @return this + * @since 5.2 + */ + public RangeOptions setTrimFactor(final Integer trimFactor) { + this.trimFactor = trimFactor; + return this; + } + /** * @return the maximum value if set */ @@ -125,6 +146,7 @@ public String toString() { return "RangeOptions{" + "min=" + min + ", max=" + max + + ", trimFactor=" + trimFactor + ", sparsity=" + sparsity + ", precision=" + precision + '}'; diff --git a/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java b/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java index 36e9053b231..edd0a4d958f 100644 --- a/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java +++ b/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java @@ -60,6 +60,10 @@ public static MongoExplicitEncryptOptions asMongoExplicitEncryptOptions(final En if (sparsity != null) { rangeOptionsBsonDocument.put("sparsity", new BsonInt64(sparsity)); } + Integer trimFactor = rangeOptions.getTrimFactor(); + if (trimFactor != null) { + rangeOptionsBsonDocument.put("trimFactor", new BsonInt32(trimFactor)); + } Integer precision = rangeOptions.getPrecision(); if (precision != null) { rangeOptionsBsonDocument.put("precision", new BsonInt32(precision)); diff --git a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Date.json b/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Date.json deleted file mode 100644 index b0299be2a33..00000000000 --- a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Date.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDate", - "bsonType": "date", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$date": { - "$numberLong": "0" - } - }, - "max": { - "$date": { - "$numberLong": "200" - } - } - } - } - ] -} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Decimal.json b/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Decimal.json deleted file mode 100644 index 8bd79a15f86..00000000000 --- a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Decimal.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDecimal", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] -} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DecimalPrecision.json b/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DecimalPrecision.json deleted file mode 100644 index d52974ef512..00000000000 --- a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DecimalPrecision.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDecimalPrecision", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDecimal": "0.0" - }, - "max": { - "$numberDecimal": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] -} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Double.json b/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Double.json deleted file mode 100644 index 5fbfaa8bdbf..00000000000 --- a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Double.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDouble", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] -} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DoublePrecision.json b/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DoublePrecision.json deleted file mode 100644 index 18b40d00974..00000000000 --- a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-DoublePrecision.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] -} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Int.json b/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Int.json deleted file mode 100644 index 819d0b98961..00000000000 --- a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Int.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedInt", - "bsonType": "int", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberInt": "0" - }, - "max": { - "$numberInt": "200" - } - } - } - ] -} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Long.json b/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Long.json deleted file mode 100644 index c500b85b534..00000000000 --- a/driver-core/src/test/resources/client-side-encryption-data/encryptedFields-Range-Long.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedLong", - "bsonType": "long", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberLong": "0" - }, - "max": { - "$numberLong": "200" - } - } - } - ] -} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Date.json b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Date.json index e19fc1e1826..defa6e37ff1 100644 --- a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Date.json +++ b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Date.json @@ -1,30 +1,36 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" }, - "path": "encryptedDate", - "bsonType": "date", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$date": { - "$numberLong": "0" - } - }, - "max": { - "$date": { - "$numberLong": "200" + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" } + }, + "max": { + "$date": { + "$numberLong": "200" } } } - ] -} + } + ] +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalNoPrecision.json b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalNoPrecision.json index c6d129d4ca1..dbe28e9c105 100644 --- a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalNoPrecision.json +++ b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalNoPrecision.json @@ -1,21 +1,26 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" }, - "path": "encryptedDecimalNoPrecision", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberInt": "1" - } + "sparsity": { + "$numberLong": "1" } } - ] - } - \ No newline at end of file + } + ] +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalPrecision.json b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalPrecision.json index c23c3fa923c..538ab20f0ed 100644 --- a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalPrecision.json +++ b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DecimalPrecision.json @@ -10,10 +10,16 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", - "sparsity": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { "$numberInt": "1" }, + "sparsity": { + "$numberLong": "1" + }, "min": { "$numberDecimal": "0.0" }, @@ -26,4 +32,4 @@ } } ] -} +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoubleNoPrecision.json b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoubleNoPrecision.json index 4af6422714b..fb4f46d3753 100644 --- a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoubleNoPrecision.json +++ b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoubleNoPrecision.json @@ -1,21 +1,26 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" }, - "path": "encryptedDoubleNoPrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - } + "sparsity": { + "$numberLong": "1" } } - ] - } - \ No newline at end of file + } + ] +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoublePrecision.json b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoublePrecision.json index c1f388219db..07d1c84d6f0 100644 --- a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoublePrecision.json +++ b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-DoublePrecision.json @@ -1,30 +1,35 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } + "precision": { + "$numberInt": "2" } } - ] - } - \ No newline at end of file + } + ] +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Int.json b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Int.json index 217bf6743c8..4f0b4854e42 100644 --- a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Int.json +++ b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Int.json @@ -1,27 +1,32 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" }, - "path": "encryptedInt", - "bsonType": "int", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberInt": "0" - }, - "max": { - "$numberInt": "200" - } + "max": { + "$numberInt": "200" } } - ] - } - \ No newline at end of file + } + ] +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Long.json b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Long.json index 0fb87edaeff..32fe1ea15db 100644 --- a/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Long.json +++ b/driver-core/src/test/resources/client-side-encryption-data/range-encryptedFields-Long.json @@ -1,27 +1,32 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" }, - "path": "encryptedLong", - "bsonType": "long", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberLong": "0" - }, - "max": { - "$numberLong": "200" - } + "max": { + "$numberLong": "200" } } - ] - } - \ No newline at end of file + } + ] +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json deleted file mode 100644 index d3dc2f830c0..00000000000 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json +++ /dev/null @@ -1,1124 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "7.0.0", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "maxServerVersion": "7.99.99" - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoubleNoPrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "FLE2 Range Double. Insert and Find.", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoubleNoPrecision": { - "$numberDouble": "0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoubleNoPrecision": { - "$numberDouble": "1" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoubleNoPrecision": { - "$gt": { - "$numberDouble": "0" - } - } - } - }, - "result": [ - { - "_id": 1, - "encryptedDoubleNoPrecision": { - "$numberDouble": "1" - } - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 0, - "encryptedDoubleNoPrecision": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoubleNoPrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedDoubleNoPrecision": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoubleNoPrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "encryptedDoubleNoPrecision": { - "$gt": { - "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", - "subType": "06" - } - } - } - }, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoubleNoPrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] - } - } - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "encryptedDoubleNoPrecision": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", - "subType": "00" - } - } - ] - }, - { - "_id": 1, - "encryptedDoubleNoPrecision": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", - "subType": "00" - } - } - ] - } - ] - } - } - } - ] -} diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json deleted file mode 100644 index 4188685a2c0..00000000000 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json +++ /dev/null @@ -1,581 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "7.0.0", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "maxServerVersion": "7.99.99" - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "FLE2 Range DoublePrecision. Aggregate.", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "0" - } - } - } - } - ] - }, - "result": [ - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1" - } - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedDoublePrecision": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "default", - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$gt": { - "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", - "subType": "06" - } - } - } - } - } - ], - "cursor": {}, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] - } - } - } - }, - "command_name": "aggregate" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", - "subType": "00" - } - } - ] - }, - { - "_id": 1, - "encryptedDoublePrecision": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", - "subType": "00" - } - }, - { - "$binary": { - "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", - "subType": "00" - } - } - ] - } - ] - } - } - } - ] -} diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Aggregate.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Aggregate.json index 9eaabe0d71a..63a2db3ef13 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -226,10 +228,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -283,10 +288,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -346,10 +354,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -383,12 +394,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -445,12 +450,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Correctness.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Correctness.json index fa887e08928..fae25a1c028 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Delete.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Delete.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Delete.json index cce4faf1887..63a2b29fccc 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -215,10 +217,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -272,10 +277,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -336,10 +344,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -373,12 +384,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json index 4392b676860..049186c8695 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -230,10 +232,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -287,10 +292,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -352,10 +360,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -389,12 +400,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -451,12 +456,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-InsertFind.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-InsertFind.json index 27ce7881df1..d0751434b5f 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -222,10 +224,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -279,10 +284,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -337,10 +345,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -374,12 +385,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -436,12 +441,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Update.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Update.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Update.json index f7d5a6af665..1e7750feebd 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Date-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -226,10 +228,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -283,10 +288,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -354,10 +362,13 @@ "path": "encryptedDate", "bsonType": "date", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -391,12 +402,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -453,12 +458,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Aggregate.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Aggregate.json index 401ee34e3f2..5f573a933db 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Aggregate.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -206,10 +208,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -253,10 +258,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -306,10 +314,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -335,12 +346,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1119,12 +1124,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Correctness.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Correctness.json index 758d3e57325..4316a31c3e3 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Correctness.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Delete.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Delete.json index 24a08f318ce..a94dd40feed 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Delete.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -197,10 +199,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -244,10 +249,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -298,10 +306,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -327,12 +338,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json index 2a8070ecf9d..5226facfb64 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -208,10 +210,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -255,10 +260,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -310,10 +318,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -339,12 +350,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1123,12 +1128,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-InsertFind.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-InsertFind.json index 2ef63f42b99..b6615454bd6 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-InsertFind.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -202,10 +204,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -249,10 +254,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -297,10 +305,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -326,12 +337,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1110,12 +1115,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Update.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Update.json index 8064eb1b189..ceef8ca9ba2 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Update.json @@ -1,11 +1,10 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -23,10 +22,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -206,10 +208,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -253,10 +258,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -314,10 +322,13 @@ "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -343,12 +354,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", @@ -1127,12 +1132,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json index 8cf143c0945..35cc4aba874 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -217,10 +219,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -273,10 +278,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -335,10 +343,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -373,12 +384,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -479,12 +484,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json index a4b06998f7e..89544458874 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json index fad8234838a..e000c405897 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -208,10 +210,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -264,10 +269,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -327,10 +335,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -365,12 +376,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json index fb8f4f4140d..27f10a30a79 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -219,10 +221,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -275,10 +280,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -339,10 +347,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -377,12 +388,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -483,12 +488,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json index 79562802e6f..5fb96730d6c 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -213,10 +215,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -362,12 +373,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -466,12 +471,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json index cc93b76948c..f67ae3ca237 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -217,10 +219,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -273,10 +278,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -343,10 +351,13 @@ "path": "encryptedDecimalPrecision", "bsonType": "decimal", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -379,12 +390,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -483,12 +488,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Aggregate.json similarity index 98% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Aggregate.json index 79f26660f24..e14ca8ff0ca 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -208,10 +210,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -255,10 +260,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -308,10 +316,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -335,12 +346,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -733,12 +738,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Correctness.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Correctness.json index 117e56af620..edb336743c9 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Delete.json similarity index 98% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Delete.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Delete.json index 40d8ed5bb2e..6821c97939b 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -199,10 +201,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -246,10 +251,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -300,10 +308,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -327,12 +338,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json similarity index 98% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json index f0893ce6612..298a4506ccf 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -210,10 +212,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -257,10 +262,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -312,10 +320,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -339,12 +350,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -737,12 +742,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-InsertFind.json similarity index 98% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Update.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-InsertFind.json index 9d6a1fbfdd1..dabe8a0930d 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Double-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -208,10 +210,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -255,10 +260,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -316,10 +324,13 @@ "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" } @@ -343,12 +354,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", @@ -741,12 +746,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json new file mode 100644 index 00000000000..dabe8a0930d --- /dev/null +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json @@ -0,0 +1,1140 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json index 60f1ea7a333..87d0e3dd8c1 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json new file mode 100644 index 00000000000..87d0e3dd8c1 --- /dev/null +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json index 4ed591d3f88..a9315dec960 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -208,10 +210,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -264,10 +269,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -327,10 +335,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -363,12 +374,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json index d8fbbfae73b..28bebe0dbb0 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -219,10 +221,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -275,10 +280,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -339,10 +347,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -375,12 +386,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -479,12 +484,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json index 4213b066d1c..3b3176be6f7 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -213,10 +215,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -362,12 +373,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -466,12 +471,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Update.json similarity index 96% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Update.json index 89eb4c338d7..be2d0e9f4af 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -217,10 +219,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -273,10 +278,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -343,10 +351,13 @@ "path": "encryptedDoublePrecision", "bsonType": "double", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -379,12 +390,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", @@ -483,12 +488,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Aggregate.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Aggregate.json index 686f0241bae..c689dede185 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -359,12 +370,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -421,12 +426,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Correctness.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Correctness.json index 2964624f22b..9dc4e4e5011 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Delete.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Delete.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Delete.json index 531b3e7590c..4a6b34a1dc9 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -205,10 +207,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -258,10 +263,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -318,10 +326,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -351,12 +362,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json index 402086cdb6b..2bf905fa652 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -216,10 +218,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -330,10 +338,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -363,12 +374,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -425,12 +430,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-InsertFind.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-InsertFind.json index 965b8a55163..a5eb4d60ec6 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -210,10 +212,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -263,10 +268,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -317,10 +325,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -350,12 +361,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -412,12 +417,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Update.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Update.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Update.json index 6cf44ac782d..e826ea2acf0 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Int-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -334,10 +342,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -367,12 +378,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -429,12 +434,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Aggregate.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Aggregate.json index 6edb38a800c..d5020f5927f 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Aggregate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -326,10 +334,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -359,12 +370,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -421,12 +426,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Correctness.json similarity index 99% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Correctness.json index 3d33f7381bb..d81e0933f80 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Correctness.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Delete.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Delete.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Delete.json index 1b327820108..3720d00341f 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Delete.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -205,10 +207,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -258,10 +263,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -318,10 +326,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -351,12 +362,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json index b8e3b888a8e..5e4b5ae0dea 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -216,10 +218,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -269,10 +274,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -330,10 +338,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -363,12 +374,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -425,12 +430,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-InsertFind.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-InsertFind.json index d637fcf9e73..0d485806267 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-InsertFind.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -210,10 +212,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -263,10 +268,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -317,10 +325,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -350,12 +361,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -412,12 +417,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", - "subType": "00" - } - }, { "$binary": { "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Update.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Update.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Update.json index 1b76019a4cf..2d3321fd80b 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-Long-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Update.json @@ -1,13 +1,12 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" - ], - "maxServerVersion": "7.99.99" + ] } ], "database_name": "default", @@ -25,10 +24,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -214,10 +216,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -267,10 +272,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -334,10 +342,13 @@ "path": "encryptedLong", "bsonType": "long", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberInt": "1" + }, "sparsity": { "$numberLong": "1" }, @@ -367,12 +378,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", - "subType": "00" - } - }, { "$binary": { "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", @@ -429,12 +434,6 @@ "$$type": "binData" }, "__safeContent__": [ - { - "$binary": { - "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", - "subType": "00" - } - }, { "$binary": { "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-WrongType.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-WrongType.json similarity index 95% rename from driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-WrongType.json rename to driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-WrongType.json index 704a693b8fd..62156045085 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Range-WrongType.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-WrongType.json @@ -1,13 +1,13 @@ { "runOn": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "8.0.0", "topology": [ "replicaset", "sharded", "load-balanced" ], - "maxServerVersion": "7.99.99" + "maxServerVersion": "8.99.99" } ], "database_name": "default", @@ -25,10 +25,13 @@ "path": "encryptedInt", "bsonType": "int", "queries": { - "queryType": "rangePreview", + "queryType": "range", "contention": { "$numberLong": "0" }, + "trimFactor": { + "$numberLong": "1" + }, "sparsity": { "$numberLong": "1" }, diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java index 37d0236293b..02110096d08 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java @@ -98,14 +98,14 @@ public interface ClientEncryption extends Closeable { * * {@code $gt} may also be {@code $gte}. {@code $lt} may also be {@code $lte}. * - *

      Only supported when queryType is "rangePreview" and algorithm is "RangePreview". - *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. + *

      Only supported when queryType is "range" and algorithm is "Range". + *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * * @param expression the Match Expression or Aggregate Expression * @param options the options * @return a Publisher containing the queryable encrypted range expression * @since 4.9 - * @mongodb.server.release 6.2 + * @mongodb.server.release 8.0 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption * @mongodb.driver.manual reference/operator/aggregation/match/ $match */ diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala index f57ddce32c6..0eda4b99de2 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala @@ -56,8 +56,8 @@ package object vault { } /** - * Range options specifies index options for a Queryable Encryption field supporting "rangePreview" queries. - * + * Range options specifies index options for a Queryable Encryption field supporting "range" queries. + *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. * @since 4.9 */ @Beta(Array(Reason.SERVER)) diff --git a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala index 3d375b56e21..226b271ff96 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala @@ -79,13 +79,13 @@ case class ClientEncryption(private val wrapped: JClientEncryption) extends Clos * * `\$gt` may also be `\$gte`. `\$lt` may also be `\$lte`. * - * Only supported when queryType is "rangePreview" and algorithm is "RangePreview". + * Only supported when queryType is "range" and algorithm is "Range". * * '''Note:''' The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. * * [[https://www.mongodb.com/docs/manual/core/queryable-encryption/ queryable encryption]] * - * @note Requires MongoDB 6.2 or greater + * @note Requires MongoDB 8.0 or greater * @param expression the Match Expression or Aggregate Expression * @param options the options * @return a Publisher containing the queryable encrypted range expression diff --git a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java index 6d529741a24..582cf94e044 100644 --- a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java +++ b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java @@ -98,14 +98,14 @@ public interface ClientEncryption extends Closeable { * * {@code $gt} may also be {@code $gte}. {@code $lt} may also be {@code $lte}. * - *

      Only supported when queryType is "rangePreview" and algorithm is "RangePreview". - *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. + *

      Only supported when queryType is "range" and algorithm is "Range". + *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * * @param expression the Match Expression or Aggregate Expression * @param options the options * @return the encrypted queryable range expression * @since 4.9 - * @mongodb.server.release 6.2 + * @mongodb.server.release 8.0 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption * @mongodb.driver.manual reference/operator/aggregation/match/ $match */ diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java index 6bb5d1d5120..061b31482ef 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java @@ -60,7 +60,6 @@ import static com.mongodb.ClusterFixture.isServerlessTest; import static com.mongodb.ClusterFixture.isStandalone; import static com.mongodb.ClusterFixture.serverVersionAtLeast; -import static com.mongodb.ClusterFixture.serverVersionLessThan; import static com.mongodb.client.Fixture.getDefaultDatabase; import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static com.mongodb.client.Fixture.getMongoClient; @@ -92,8 +91,7 @@ public abstract class AbstractClientSideEncryptionRangeExplicitEncryptionTest { @BeforeEach public void setUp(final Type type) { - assumeTrue(serverVersionLessThan(8, 0)); - assumeTrue(serverVersionAtLeast(6, 2)); + assumeTrue(serverVersionAtLeast(8, 0)); assumeFalse(isStandalone()); assumeFalse(isServerlessTest()); @@ -139,7 +137,7 @@ public void setUp(final Type type) { .build()) .build()); - encryptOptions = new EncryptOptions("RangePreview") + encryptOptions = new EncryptOptions("Range") .keyId(key1Id) .contentionFactor(0L) .rangeOptions(type.getRangeOptions()); @@ -149,9 +147,9 @@ public void setUp(final Type type) { BsonBinary encryptedValue30 = clientEncryption.encrypt(type.convertNumber(30), encryptOptions); BsonBinary encryptedValue200 = clientEncryption.encrypt(type.convertNumber(200), encryptOptions); - encryptQueryOptions = new EncryptOptions("RangePreview") + encryptQueryOptions = new EncryptOptions("Range") .keyId(key1Id) - .queryType("rangePreview") + .queryType("range") .contentionFactor(0L) .rangeOptions(type.getRangeOptions()); @@ -292,7 +290,7 @@ void testEncryptingADocumentOfADifferentTypeErrors(final Type type) { void testSettingPrecisionErrorsIfTheTypeIsNotADouble(final Type type) { BsonValue originalValue = type == Type.INT ? new BsonDouble(6) : new BsonInt32(6); - EncryptOptions precisionEncryptOptions = new EncryptOptions("RangePreview") + EncryptOptions precisionEncryptOptions = new EncryptOptions("Range") .keyId(key1Id) .contentionFactor(0L) .rangeOptions(type.getRangeOptions().precision(2)); @@ -319,7 +317,9 @@ public String toString() { } RangeOptions getRangeOptions() { - RangeOptions rangeOptions = new RangeOptions().sparsity(1L); + RangeOptions rangeOptions = new RangeOptions() + .setTrimFactor(1) + .sparsity(1L); switch (this) { case DECIMAL_NO_PRECISION: case DOUBLE_NO_PRECISION: From 83ff78a527a32f602e524a591c273e084f3a817f Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Wed, 31 Jul 2024 17:47:00 +0100 Subject: [PATCH 54/90] Kotlin: Updated driver metadata (#1461) Added Kotlin version to the driver platform metadata. JAVA-5539 --- .../com/mongodb/kotlin/client/coroutine/MongoClient.kt | 5 ++++- .../src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt index c4c2acc27f6..54688798987 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt @@ -88,7 +88,10 @@ public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapp val builder = if (mongoDriverInformation == null) MongoDriverInformation.builder() else MongoDriverInformation.builder(mongoDriverInformation) - return MongoClient(JMongoClients.create(settings, builder.driverName("kotlin").build())) + return MongoClient( + JMongoClients.create( + settings, + builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) } } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt index bdf2ba30bd5..09894c683bb 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt @@ -86,7 +86,10 @@ public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapp val builder = if (mongoDriverInformation == null) MongoDriverInformation.builder() else MongoDriverInformation.builder(mongoDriverInformation) - return MongoClient(JMongoClients.create(settings, builder.driverName("kotlin").build())) + return MongoClient( + JMongoClients.create( + settings, + builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) } } From 6740e9bf06df7a5de037231095723ef9e16d0189 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Wed, 31 Jul 2024 17:46:55 -0700 Subject: [PATCH 55/90] Add 'type' field support for Search Index creation. (#1438) JAVA-5323 --------- Co-authored-by: Valentin Kovalenko --- .../client/model/SearchIndexModel.java | 34 ++++++- .../mongodb/client/model/SearchIndexType.java | 83 ++++++++++++++++ .../client/model/SearchIndexTypeBson.java | 52 ++++++++++ .../CreateSearchIndexesOperation.java | 5 + .../internal/operation/Operations.java | 4 +- .../operation/SearchIndexRequest.java | 15 ++- .../index-management/createSearchIndex.json | 72 +++++++++++++- .../index-management/createSearchIndexes.json | 74 +++++++++++++- .../client/ListSearchIndexesPublisher.java | 2 +- .../client/MongoCollection.java | 14 +-- .../scala/ListSearchIndexesObservable.scala | 2 +- .../org/mongodb/scala/MongoCollection.scala | 12 +-- .../org/mongodb/scala/model/package.scala | 18 +++- .../client/ListSearchIndexesIterable.java | 2 +- .../com/mongodb/client/MongoCollection.java | 14 +-- ...ctAtlasSearchIndexManagementProseTest.java | 96 ++++++++++++++++++- .../client/unified/UnifiedCrudHelper.java | 33 ++++--- 17 files changed, 477 insertions(+), 55 deletions(-) create mode 100644 driver-core/src/main/com/mongodb/client/model/SearchIndexType.java create mode 100644 driver-core/src/main/com/mongodb/client/model/SearchIndexTypeBson.java diff --git a/driver-core/src/main/com/mongodb/client/model/SearchIndexModel.java b/driver-core/src/main/com/mongodb/client/model/SearchIndexModel.java index 124be4885c0..2a229e1a579 100644 --- a/driver-core/src/main/com/mongodb/client/model/SearchIndexModel.java +++ b/driver-core/src/main/com/mongodb/client/model/SearchIndexModel.java @@ -25,12 +25,14 @@ * A model describing the creation of a single Atlas Search index. * * @since 4.11 - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 */ public final class SearchIndexModel { @Nullable private final String name; private final Bson definition; + @Nullable + private final SearchIndexType type; /** * Construct an instance with the given Atlas Search index mapping definition. @@ -42,8 +44,7 @@ public final class SearchIndexModel { * @param definition the search index mapping definition. */ public SearchIndexModel(final Bson definition) { - this.definition = notNull("definition", definition); - this.name = null; + this(null, definition, null); } /** @@ -53,8 +54,21 @@ public SearchIndexModel(final Bson definition) { * @param definition the search index mapping definition. */ public SearchIndexModel(final String name, final Bson definition) { + this(name, definition, null); + } + + /** + * Construct an instance with the given Atlas Search name, index definition, and type. + * + * @param name the search index name. + * @param definition the search index mapping definition. + * @param type the search index type. + * @since 5.2 + */ + public SearchIndexModel(@Nullable final String name, final Bson definition, @Nullable final SearchIndexType type) { this.definition = notNull("definition", definition); - this.name = notNull("name", name); + this.name = name; + this.type = type; } /** @@ -76,11 +90,23 @@ public String getName() { return name; } + /** + * Get the Atlas Search index type. + * + * @return the search index type. + * @since 5.2 + */ + @Nullable + public SearchIndexType getType() { + return type; + } + @Override public String toString() { return "SearchIndexModel{" + "name=" + name + ", definition=" + definition + + ", type=" + (type == null ? "null" : type.toBsonValue()) + '}'; } } diff --git a/driver-core/src/main/com/mongodb/client/model/SearchIndexType.java b/driver-core/src/main/com/mongodb/client/model/SearchIndexType.java new file mode 100644 index 00000000000..5ed73461a05 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/SearchIndexType.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.annotations.Sealed; +import org.bson.BsonString; +import org.bson.BsonValue; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * This interface represents an Atlas Search Index type, which is utilized for creating specific types of indexes. + *

      + * It provides methods for creating and converting Atlas Search Index types to {@link BsonValue}. + *

      + * + * @mongodb.server.release 6.0 + * @see SearchIndexModel The model class that utilizes this index type. + * @since 5.2 + */ +@Sealed +public interface SearchIndexType { + + /** + * Returns a {@link SearchIndexType} instance representing the "search" index type. + * + * @return The requested {@link SearchIndexType}. + */ + static SearchIndexType search() { + return new SearchIndexTypeBson(new BsonString("search")); + } + + /** + * Returns a {@link SearchIndexType} instance representing the "vectorSearch" index type. + * + * @return The requested {@link SearchIndexType}. + */ + static SearchIndexType vectorSearch() { + return new SearchIndexTypeBson(new BsonString("vectorSearch")); + } + + /** + * Creates a {@link SearchIndexType} from a {@link BsonValue} in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

      + * Example
      + * The following code creates two functionally equivalent {@link SearchIndexType}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

      {@code
      +     *  SearchIndexType type1 = SearchIndexType.vectorSearch();
      +     *  SearchIndexType type2 = SearchIndexType.of(new BsonString("vectorSearch"));
      +     * }
      + * + * @param indexType A {@link BsonValue} representing the required {@link SearchIndexType}. + * @return The requested {@link SearchIndexType}. + */ + static SearchIndexType of(final BsonValue indexType) { + notNull("indexType", indexType); + return new SearchIndexTypeBson(indexType); + } + + /** + * Converts this object to {@link BsonValue}. + * + * @return A {@link BsonValue} representing this {@link SearchIndexType}. + */ + BsonValue toBsonValue(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/SearchIndexTypeBson.java b/driver-core/src/main/com/mongodb/client/model/SearchIndexTypeBson.java new file mode 100644 index 00000000000..75e8788e681 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/SearchIndexTypeBson.java @@ -0,0 +1,52 @@ +package com.mongodb.client.model; + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.bson.BsonValue; + +import java.util.Objects; + +final class SearchIndexTypeBson implements SearchIndexType { + private final BsonValue bsonValue; + + SearchIndexTypeBson(final BsonValue bsonValue) { + this.bsonValue = bsonValue; + } + + @Override + public BsonValue toBsonValue() { + return bsonValue; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SearchIndexTypeBson that = (SearchIndexTypeBson) o; + return Objects.equals(bsonValue, that.bsonValue); + } + + @Override + public int hashCode() { + return Objects.hash(bsonValue); + } +} + diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java index 1a44d887586..2e52e3fa0ae 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java @@ -17,6 +17,7 @@ package com.mongodb.internal.operation; import com.mongodb.MongoNamespace; +import com.mongodb.client.model.SearchIndexType; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonString; @@ -52,6 +53,10 @@ private static BsonDocument convert(final SearchIndexRequest request) { if (searchIndexName != null) { bsonIndexRequest.append("name", new BsonString(searchIndexName)); } + SearchIndexType searchIndexType = request.getSearchIndexType(); + if (searchIndexType != null) { + bsonIndexRequest.append("type", searchIndexType.toBsonValue()); + } bsonIndexRequest.append("definition", request.getDefinition()); return bsonIndexRequest; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/Operations.java b/driver-core/src/main/com/mongodb/internal/operation/Operations.java index e271f23d522..5ec696b61ce 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/Operations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/Operations.java @@ -48,6 +48,7 @@ import com.mongodb.client.model.ReplaceOptions; import com.mongodb.client.model.ReturnDocument; import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.SearchIndexType; import com.mongodb.client.model.UpdateManyModel; import com.mongodb.client.model.UpdateOneModel; import com.mongodb.client.model.UpdateOptions; @@ -752,7 +753,8 @@ private List toBsonDocumentList(@Nullable final List The type of the result. * @since 4.11 - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 */ @Evolving public interface ListSearchIndexesPublisher extends Publisher { diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java index 4e17208b342..821c7723a74 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java @@ -1465,7 +1465,7 @@ Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, * @param indexName the name of the search index to create. * @param definition Atlas Search index mapping definition. * @return a {@link Publisher} with search index name. - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes * @since 4.11 */ @@ -1476,7 +1476,7 @@ Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, * * @param definition Atlas Search index mapping definition. * @return a {@link Publisher} with search index name. - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes * @since 4.11 */ @@ -1490,7 +1490,7 @@ Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, * * @param searchIndexModels the search index models. * @return a {@link Publisher} with the search index names in the order specified by the given list {@link SearchIndexModel}s. - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes * @since 4.11 */ @@ -1501,7 +1501,7 @@ Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, * @param indexName the name of the search index to update. * @param definition Atlas Search index mapping definition. * @return an empty publisher that indicates when the operation has completed. - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 * @mongodb.driver.manual reference/command/updateSearchIndex/ Update Search index * @since 4.11 */ @@ -1511,7 +1511,7 @@ Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, * * @param indexName the name of the search index to drop. * @return an empty publisher that indicates when the operation has completed. - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 * @mongodb.driver.manual reference/command/dropSearchIndex/ Drop Search index * @since 4.11 */ @@ -1522,7 +1522,7 @@ Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, * * @return the fluent list search indexes interface. * @since 4.11 - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 */ ListSearchIndexesPublisher listSearchIndexes(); @@ -1533,7 +1533,7 @@ Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, * @param the target document type of the iterable. * @return the fluent list search indexes interface. * @since 4.11 - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 */ ListSearchIndexesPublisher listSearchIndexes(Class resultClass); diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala index e1aee7dce1a..db7b687c498 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala @@ -41,7 +41,7 @@ case class ListSearchIndexesObservable[TResult](wrapped: ListSearchIndexesPublis * Sets an Atlas Search index name for this operation. * * @param indexName Atlas Search index name. - * @note Requires MongoDB 7.0 or greater + * @note Requires MongoDB 6.0 or greater */ def name(indexName: String): ListSearchIndexesObservable[TResult] = { wrapped.name(indexName) diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala index bdd63f9245a..48e09aa7921 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala @@ -1414,7 +1414,7 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul * @param definition the search index mapping definition. * @return an Observable with the search index name. * @since 4.11 - * @note Requires MongoDB 7.0 or greater + * @note Requires MongoDB 6.0 or greater * @see [[https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/ Create Search Indexes]] */ def createSearchIndex(indexName: String, definition: Bson): SingleObservable[String] = @@ -1426,7 +1426,7 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul * @param definition the search index mapping definition. * @return an Observable with search index name. * @since 4.11 - * @note Requires MongoDB 7.0 or greater + * @note Requires MongoDB 6.0 or greater * @see [[https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/ Create Search Indexes]] */ def createSearchIndex(definition: Bson): SingleObservable[String] = wrapped.createSearchIndex(definition) @@ -1441,7 +1441,7 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul * @return an Observable with the names of the search indexes * in the order specified by the given list of [[org.mongodb.scala.model.SearchIndexModel]]s. * @since 4.11 - * @note Requires MongoDB 7.0 or greater + * @note Requires MongoDB 6.0 or greater * @see [[https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/ Create Search Indexes]] */ def createSearchIndexes(searchIndexModels: List[SearchIndexModel]): Observable[String] = @@ -1454,7 +1454,7 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul * @param definition the search index mapping definition. * @return an Observable that indicates when the operation has completed. * @since 4.11 - * @note Requires MongoDB 7.0 or greater + * @note Requires MongoDB 6.0 or greater * @see [[https://www.mongodb.com/docs/manual/reference/command/updateSearchIndex/ Update Search Index]] */ def updateSearchIndex(indexName: String, definition: Bson): SingleObservable[Unit] = @@ -1466,7 +1466,7 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul * @param indexName the name of the search index to drop. * @return an Observable that indicates when the operation has completed. * @since 4.11 - * @note Requires MongoDB 7.0 or greater + * @note Requires MongoDB 6.0 or greater * @see [[https://www.mongodb.com/docs/manual/reference/command/dropSearchIndex/ Drop Search Index]] */ def dropSearchIndex(indexName: String): SingleObservable[Unit] = wrapped.dropSearchIndex(indexName) @@ -1477,7 +1477,7 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul * @tparam C the target document type of the observable. * @return the fluent list search indexes interface * @since 4.11 - * @note Requires MongoDB 7.0 or greater + * @note Requires MongoDB 6.0 or greater * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes List Search Indexes]] */ def listSearchIndexes[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ListSearchIndexesObservable[C] = diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala index 111af0e6568..0d23a38c2e8 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala @@ -19,7 +19,7 @@ package org.mongodb.scala import com.mongodb.annotations.{ Beta, Reason, Sealed } import scala.collection.JavaConverters._ -import com.mongodb.client.model.{ GeoNearOptions, MongoTimeUnit => JMongoTimeUnit, WindowOutputField } +import com.mongodb.client.model.{ MongoTimeUnit => JMongoTimeUnit } import org.mongodb.scala.bson.conversions.Bson // scalastyle:off number.of.methods number.of.types @@ -481,6 +481,11 @@ package object model { */ type SearchIndexModel = com.mongodb.client.model.SearchIndexModel + /** + * Represents an Atlas Search Index type, which is utilized for creating specific types of indexes. + */ + type SearchIndexType = com.mongodb.client.model.SearchIndexType + /** * A model describing the creation of a single Atlas Search index. */ @@ -507,6 +512,17 @@ package object model { */ def apply(indexName: String, definition: Bson): SearchIndexModel = new com.mongodb.client.model.SearchIndexModel(indexName, definition) + + /** + * Construct an instance with the given search index name and definition. + * + * @param indexName the name of the search index to create. + * @param definition the search index mapping definition. + * @param indexType the search index type. + * @return the SearchIndexModel + */ + def apply(indexName: Option[String], definition: Bson, indexType: Option[SearchIndexType]): SearchIndexModel = + new com.mongodb.client.model.SearchIndexModel(indexName.orNull, definition, indexType.orNull) } /** diff --git a/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java index 2384fcef29d..a5579bacfd5 100644 --- a/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java +++ b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java @@ -34,7 +34,7 @@ * @param The type of the result. * @mongodb.driver.manual reference/operator/aggregation/listSearchIndexes ListSearchIndexes * @since 4.11 - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 */ @Evolving public interface ListSearchIndexesIterable extends MongoIterable { diff --git a/driver-sync/src/main/com/mongodb/client/MongoCollection.java b/driver-sync/src/main/com/mongodb/client/MongoCollection.java index 7db38040bed..0d3248b613f 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoCollection.java +++ b/driver-sync/src/main/com/mongodb/client/MongoCollection.java @@ -1751,7 +1751,7 @@ BulkWriteResult bulkWrite(ClientSession clientSession, List listSearchIndexes(); @@ -1819,7 +1819,7 @@ BulkWriteResult bulkWrite(ClientSession clientSession, List the target document type of the iterable. * @return the list search indexes iterable interface. * @since 4.11 - * @mongodb.server.release 7.0 + * @mongodb.server.release 6.0 */ ListSearchIndexesIterable listSearchIndexes(Class resultClass); diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractAtlasSearchIndexManagementProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractAtlasSearchIndexManagementProseTest.java index fd7bc428576..17c007e14ba 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractAtlasSearchIndexManagementProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractAtlasSearchIndexManagementProseTest.java @@ -17,9 +17,11 @@ package com.mongodb.client; import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; import com.mongodb.ReadConcern; import com.mongodb.WriteConcern; import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.SearchIndexType; import com.mongodb.event.CommandListener; import com.mongodb.event.CommandStartedEvent; import org.bson.BsonDocument; @@ -32,7 +34,6 @@ import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.UUID; @@ -46,11 +47,15 @@ import static com.mongodb.assertions.Assertions.assertFalse; import static com.mongodb.client.Fixture.getMongoClientSettings; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; /** - * See Search Index Management Tests + * See Search Index Management Tests */ public abstract class AbstractAtlasSearchIndexManagementProseTest { /** @@ -74,6 +79,18 @@ public abstract class AbstractAtlasSearchIndexManagementProseTest { "{" + " mappings: { dynamic: true }" + "}"); + private static final Document VECTOR_SEARCH_DEFINITION = Document.parse( + "{" + + " fields: [" + + " {" + + " type: 'vector'," + + " path: 'plot_embedding'," + + " numDimensions: 1536," + + " similarity: 'euclidean'," + + " }," + + " ]" + + "}"); + private MongoClient client = createMongoClient(getMongoClientSettings()); private MongoDatabase db; private MongoCollection collection; @@ -153,7 +170,7 @@ public void shouldCreateMultipleIndexesInBatch() throws InterruptedException { SearchIndexModel searchIndexModel2 = new SearchIndexModel(TEST_SEARCH_INDEX_NAME_2, NOT_DYNAMIC_MAPPING_DEFINITION); //when - List searchIndexes = collection.createSearchIndexes(Arrays.asList(searchIndexModel1, searchIndexModel2)); + List searchIndexes = collection.createSearchIndexes(asList(searchIndexModel1, searchIndexModel2)); //then assertThat(searchIndexes, contains(TEST_SEARCH_INDEX_NAME_1, TEST_SEARCH_INDEX_NAME_2)); @@ -200,6 +217,69 @@ public void shouldSuppressNamespaceErrorWhenDroppingIndexWithoutCollection() { collection.dropSearchIndex("not existent index"); } + @Test + @DisplayName("Case 7 implicit: Driver can successfully handle search index types when creating indexes") + public void shouldHandleImplicitSearchIndexTypes() throws InterruptedException { + //given + String indexName = "test-search-index-case7-implicit"; + + //when + String result = collection.createSearchIndex( + indexName, + NOT_DYNAMIC_MAPPING_DEFINITION); + + //then + assertEquals(indexName, result); + awaitIndexChanges(isQueryable().and(hasSearchIndexType()), new SearchIndexModel(indexName, NOT_DYNAMIC_MAPPING_DEFINITION)); + } + + @Test + @DisplayName("Case 7 explicit 'search' type: Driver can successfully handle search index types when creating indexes") + public void shouldHandleExplicitSearchIndexTypes() throws InterruptedException { + //given + String indexName = "test-search-index-case7-explicit"; + + //when + List searchIndexes = collection.createSearchIndexes(singletonList(new SearchIndexModel( + indexName, + NOT_DYNAMIC_MAPPING_DEFINITION, + SearchIndexType.search()))); + + //then + assertEquals(1, searchIndexes.size()); + assertEquals(indexName, searchIndexes.get(0)); + awaitIndexChanges(isQueryable().and(hasSearchIndexType()), new SearchIndexModel(indexName, NOT_DYNAMIC_MAPPING_DEFINITION)); + } + + @Test + @DisplayName("Case 7 explicit 'vectorSearch' type: Driver can successfully handle search index types when creating indexes") + public void shouldHandleExplicitVectorSearchIndexTypes() throws InterruptedException { + //given + String indexName = "test-search-index-case7-vector"; + + //when + List searchIndexes = collection.createSearchIndexes(singletonList(new SearchIndexModel( + indexName, + VECTOR_SEARCH_DEFINITION, + SearchIndexType.vectorSearch()))); + + //then + assertEquals(1, searchIndexes.size()); + assertEquals(indexName, searchIndexes.get(0)); + awaitIndexChanges(isQueryable().and(hasVectorSearchIndexType()), new SearchIndexModel(indexName, NOT_DYNAMIC_MAPPING_DEFINITION)); + } + + @Test + @DisplayName("Case 8: Driver requires explicit type to create a vector search index") + public void shouldRequireExplicitTypeToCreateVectorSearchIndex() { + //given + String indexName = "test-search-index-case8-error"; + + //when & then + assertThrows(MongoCommandException.class, () -> collection.createSearchIndex( + indexName, + VECTOR_SEARCH_DEFINITION)); + } private void assertIndexDeleted() throws InterruptedException { int attempts = MAX_WAIT_ATTEMPTS; @@ -250,6 +330,16 @@ private Predicate isReady() { } + private Predicate hasSearchIndexType() { + return document -> "search".equals(document.getString("type")); + } + + private Predicate hasVectorSearchIndexType() { + return document -> "vectorSearch".equals(document.getString("type")); + } + + + private boolean checkAttempt(final int attempt) { Assertions.assertFalse(attempt <= 0, "Exceeded maximum attempts waiting for Search Index changes in Atlas cluster"); return true; diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java index 67f95903997..041f016510f 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java @@ -68,6 +68,7 @@ import com.mongodb.client.model.ReplaceOptions; import com.mongodb.client.model.ReturnDocument; import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.SearchIndexType; import com.mongodb.client.model.TimeSeriesGranularity; import com.mongodb.client.model.TimeSeriesOptions; import com.mongodb.client.model.UpdateManyModel; @@ -99,6 +100,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -1508,19 +1510,24 @@ OperationResult executeCreateSearchIndex(final BsonDocument operation) { MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); BsonDocument model = arguments.getDocument("model"); - BsonDocument definition = model.getDocument("definition"); return resultOf(() -> { - if (model.containsKey("name")) { - String name = model.getString("name").getValue(); - collection.createSearchIndex(name, definition); - } else { - collection.createSearchIndex(definition); - } + collection.createSearchIndexes(Collections.singletonList(toIndexSearchModel(model))); return null; }); } + private static SearchIndexType getSearchIndexType(final BsonString type) { + switch (type.getValue()) { + case "search": + return SearchIndexType.search(); + case "vectorSearch": + return SearchIndexType.vectorSearch(); + default: + throw new UnsupportedOperationException("Unsupported search index type: " + type.getValue()); + } + } + OperationResult executeCreateSearchIndexes(final BsonDocument operation) { MongoCollection collection = getMongoCollection(operation); BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); @@ -1561,14 +1568,12 @@ OperationResult executeDropSearchIndex(final BsonDocument operation) { private static SearchIndexModel toIndexSearchModel(final BsonValue bsonValue) { BsonDocument model = bsonValue.asDocument(); - String name; BsonDocument definition = model.getDocument("definition"); - if (model.containsKey("name")) { - name = model.getString("name").getValue(); - return new SearchIndexModel(name, definition); - } else { - return new SearchIndexModel(definition); - } + SearchIndexType type = model.containsKey("type") ? getSearchIndexType(model.getString("type")) : null; + String name = Optional.ofNullable(model.getString("name", null)) + .map(BsonString::getValue). + orElse(null); + return new SearchIndexModel(name, definition, type); } From f443751e917db808b93c46a03e734841e18d0d0e Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Thu, 1 Aug 2024 08:19:03 +0100 Subject: [PATCH 56/90] Ensure Sink.contextView is propagated (#1450) Context view is propagated via the subscriber, so any nested subscribe calls need to have the context passed through. JAVA-5345 --- .../client/internal/BatchCursorFlux.java | 11 +- .../client/internal/BatchCursorPublisher.java | 18 ++- .../client/internal/crypt/Crypt.java | 5 + .../gridfs/GridFSUploadPublisherImpl.java | 107 +++++++----------- 4 files changed, 56 insertions(+), 85 deletions(-) diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java index 90bbe9ed0a4..119598a265b 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java @@ -18,11 +18,9 @@ import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; -import reactor.core.CoreSubscriber; import reactor.core.publisher.Flux; import reactor.core.publisher.FluxSink; import reactor.core.publisher.Mono; -import reactor.util.context.Context; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; @@ -48,9 +46,9 @@ public void subscribe(final Subscriber subscriber) { if (calculateDemand(demand) > 0 && inProgress.compareAndSet(false, true)) { if (batchCursor == null) { int batchSize = calculateBatchSize(sink.requestedFromDownstream()); - Context initialContext = subscriber instanceof CoreSubscriber - ? ((CoreSubscriber) subscriber).currentContext() : null; - batchCursorPublisher.batchCursor(batchSize).subscribe(bc -> { + batchCursorPublisher.batchCursor(batchSize) + .contextWrite(sink.contextView()) + .subscribe(bc -> { batchCursor = bc; inProgress.set(false); @@ -60,7 +58,7 @@ public void subscribe(final Subscriber subscriber) { } else { recurseCursor(); } - }, sink::error, null, initialContext); + }, sink::error); } else { inProgress.set(false); recurseCursor(); @@ -86,6 +84,7 @@ private void recurseCursor(){ } else { batchCursor.setBatchSize(calculateBatchSize(sink.requestedFromDownstream())); Mono.from(batchCursor.next(() -> sink.isCancelled())) + .contextWrite(sink.contextView()) .doOnCancel(this::closeCursor) .subscribe(results -> { if (!results.isEmpty()) { diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java index cf5a9d9f25b..13ee27f002f 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java @@ -123,21 +123,17 @@ public TimeoutMode getTimeoutMode() { public Publisher first() { return batchCursor(this::asAsyncFirstReadOperation) - .flatMap(batchCursor -> Mono.create(sink -> { + .flatMap(batchCursor -> { batchCursor.setBatchSize(1); - Mono.from(batchCursor.next()) + return Mono.from(batchCursor.next()) .doOnTerminate(batchCursor::close) - .doOnError(sink::error) - .doOnSuccess(results -> { + .flatMap(results -> { if (results == null || results.isEmpty()) { - sink.success(); - } else { - sink.success(results.get(0)); + return Mono.empty(); } - }) - .contextWrite(sink.contextView()) - .subscribe(); - })); + return Mono.fromCallable(() -> results.get(0)); + }); + }); } @Override diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java index 6d5aca27457..13d9373a3ff 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java @@ -306,6 +306,7 @@ private void collInfo(final MongoCryptContext cryptContext, sink.error(new IllegalStateException("Missing database name")); } else { collectionInfoRetriever.filter(databaseName, cryptContext.getMongoOperation(), operationTimeout) + .contextWrite(sink.contextView()) .doOnSuccess(result -> { if (result != null) { cryptContext.addMongoOperationResult(result); @@ -328,6 +329,7 @@ private void mark(final MongoCryptContext cryptContext, sink.error(wrapInClientException(new IllegalStateException("Missing database name"))); } else { commandMarker.mark(databaseName, cryptContext.getMongoOperation(), operationTimeout) + .contextWrite(sink.contextView()) .doOnSuccess(result -> { cryptContext.addMongoOperationResult(result); cryptContext.completeMongoOperation(); @@ -343,6 +345,7 @@ private void fetchKeys(final MongoCryptContext cryptContext, final MonoSink sink, @Nullable final Timeout operationTimeout) { keyRetriever.find(cryptContext.getMongoOperation(), operationTimeout) + .contextWrite(sink.contextView()) .doOnSuccess(results -> { for (BsonDocument result : results) { cryptContext.addMongoOperationResult(result); @@ -361,11 +364,13 @@ private void decryptKeys(final MongoCryptContext cryptContext, MongoKeyDecryptor keyDecryptor = cryptContext.nextKeyDecryptor(); if (keyDecryptor != null) { keyManagementService.decryptKey(keyDecryptor, operationTimeout) + .contextWrite(sink.contextView()) .doOnSuccess(r -> decryptKeys(cryptContext, databaseName, sink, operationTimeout)) .doOnError(e -> sink.error(wrapInClientException(e))) .subscribe(); } else { Mono.fromRunnable(cryptContext::completeKeyDecryptors) + .contextWrite(sink.contextView()) .doOnSuccess(r -> executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout)) .doOnError(e -> sink.error(wrapInClientException(e))) .subscribe(); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java index a45d369c676..7d9a46cdf3f 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java @@ -40,9 +40,6 @@ import java.util.Date; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Function; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.notNull; @@ -106,7 +103,7 @@ public BsonValue getId() { @Override public void subscribe(final Subscriber s) { - Mono.defer(() -> { + Mono.deferContextual(ctx -> { AtomicBoolean terminated = new AtomicBoolean(false); Timeout timeout = TimeoutContext.startTimeout(timeoutMs); return createCheckAndCreateIndexesMono(timeout) @@ -120,7 +117,7 @@ public void subscribe(final Subscriber s) { return originalError; }) .then(Mono.error(originalError))) - .doOnCancel(() -> createCancellationMono(terminated, timeout).subscribe()) + .doOnCancel(() -> createCancellationMono(terminated, timeout).contextWrite(ctx).subscribe()) .then(); }).subscribe(s); } @@ -149,38 +146,15 @@ public void subscribe(final Subscriber subscriber) { } private Mono createCheckAndCreateIndexesMono(@Nullable final Timeout timeout) { - AtomicBoolean collectionExists = new AtomicBoolean(false); - return Mono.create(sink -> findAllInCollection(filesCollection, timeout).subscribe( - d -> collectionExists.set(true), - sink::error, - () -> { - if (collectionExists.get()) { - sink.success(); - } else { - checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX, timeout) - .doOnSuccess(i -> checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX, timeout) - .subscribe(unused -> {}, sink::error, sink::success)) - .subscribe(unused -> {}, sink::error); - } - }) - ); - } - - private Mono findAllInCollection(final MongoCollection collection, @Nullable final Timeout timeout) { - return collectionWithTimeoutDeferred(collection - .withDocumentClass(Document.class) - .withReadPreference(primary()), timeout) - .flatMap(wrappedCollection -> { - if (clientSession != null) { - return Mono.from(wrappedCollection.find(clientSession) - .projection(PROJECTION) - .first()); - } else { - return Mono.from(wrappedCollection.find() - .projection(PROJECTION) - .first()); - } - }); + return collectionWithTimeoutDeferred(filesCollection.withDocumentClass(Document.class).withReadPreference(primary()), timeout) + .map(collection -> clientSession != null ? collection.find(clientSession) : collection.find()) + .flatMap(findPublisher -> Mono.from(findPublisher.projection(PROJECTION).first())) + .switchIfEmpty(Mono.defer(() -> + checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX, timeout) + .then(checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX, timeout)) + .then(Mono.empty()) + )) + .then(); } private Mono hasIndex(final MongoCollection collection, final Document index, @Nullable final Timeout timeout) { @@ -228,40 +202,37 @@ private Mono createIndexMono(final MongoCollection collection, fi } private Mono createSaveChunksMono(final AtomicBoolean terminated, @Nullable final Timeout timeout) { - return Mono.create(sink -> { - AtomicLong lengthInBytes = new AtomicLong(0); - AtomicInteger chunkIndex = new AtomicInteger(0); - new ResizingByteBufferFlux(source, chunkSizeBytes) - .takeUntilOther(createMonoTimer(timeout)) - .flatMap((Function>) byteBuffer -> { - if (terminated.get()) { - return Mono.empty(); - } - byte[] byteArray = new byte[byteBuffer.remaining()]; - if (byteBuffer.hasArray()) { - System.arraycopy(byteBuffer.array(), byteBuffer.position(), byteArray, 0, byteBuffer.remaining()); - } else { - byteBuffer.mark(); - byteBuffer.get(byteArray); - byteBuffer.reset(); - } - Binary data = new Binary(byteArray); - lengthInBytes.addAndGet(data.length()); + return new ResizingByteBufferFlux(source, chunkSizeBytes) + .takeUntilOther(createMonoTimer(timeout)) + .index() + .flatMap(indexAndBuffer -> { + if (terminated.get()) { + return Mono.empty(); + } + Long index = indexAndBuffer.getT1(); + ByteBuffer byteBuffer = indexAndBuffer.getT2(); + byte[] byteArray = new byte[byteBuffer.remaining()]; + if (byteBuffer.hasArray()) { + System.arraycopy(byteBuffer.array(), byteBuffer.position(), byteArray, 0, byteBuffer.remaining()); + } else { + byteBuffer.mark(); + byteBuffer.get(byteArray); + byteBuffer.reset(); + } + Binary data = new Binary(byteArray); - Document chunkDocument = new Document("files_id", fileId) - .append("n", chunkIndex.getAndIncrement()) - .append("data", data); + Document chunkDocument = new Document("files_id", fileId) + .append("n", index.intValue()) + .append("data", data); - if (clientSession == null) { - return collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(chunkDocument); - } else { - return collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(clientSession, - chunkDocument); - } + Publisher insertOnePublisher = clientSession == null + ? collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(chunkDocument) + : collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE) + .insertOne(clientSession, chunkDocument); - }) - .subscribe(null, sink::error, () -> sink.success(lengthInBytes.get())); - }); + return Mono.from(insertOnePublisher).thenReturn(data.length()); + }) + .reduce(0L, Long::sum); } /** From 4df110886862615eab7dc02be559138385234806 Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Thu, 1 Aug 2024 08:20:29 +0100 Subject: [PATCH 57/90] Added Bson-Kotlin Array Codec (#1457) Adds Kotlin array support to the bson-kotlin library JAVA-5122 Co-authored-by: Viacheslav Babanin --- .../org/bson/codecs/kotlin/ArrayCodec.kt | 128 ++++++++++++++++++ .../bson/codecs/kotlin/ArrayCodecProvider.kt | 31 +++++ .../org/bson/codecs/kotlin/DataClassCodec.kt | 7 +- .../bson/codecs/kotlin/DataClassCodecTest.kt | 57 +++++++- .../bson/codecs/kotlin/samples/DataClasses.kt | 85 ++++++++++++ .../main/com/mongodb/KotlinCodecProvider.java | 6 +- 6 files changed, 310 insertions(+), 4 deletions(-) create mode 100644 bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodec.kt create mode 100644 bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodecProvider.kt diff --git a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodec.kt b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodec.kt new file mode 100644 index 00000000000..10ea90aee1b --- /dev/null +++ b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodec.kt @@ -0,0 +1,128 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import java.lang.reflect.ParameterizedType +import java.lang.reflect.Type +import kotlin.reflect.KClass +import org.bson.BsonReader +import org.bson.BsonType +import org.bson.BsonWriter +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecRegistry + +@Suppress("UNCHECKED_CAST") +internal data class ArrayCodec(private val kClass: KClass, private val codec: Codec) : Codec { + + companion object { + internal fun create( + kClass: KClass, + typeArguments: List, + codecRegistry: CodecRegistry + ): Codec { + assert(kClass.javaObjectType.isArray) { "$kClass must be an array type" } + val (valueClass, nestedTypes) = + if (typeArguments.isEmpty()) { + Pair(kClass.java.componentType.kotlin.javaObjectType as Class, emptyList()) + } else { + // Unroll the actual class and any type arguments + when (val pType = typeArguments[0]) { + is Class<*> -> Pair(pType as Class, emptyList()) + is ParameterizedType -> Pair(pType.rawType as Class, pType.actualTypeArguments.toList()) + else -> Pair(Object::class.java as Class, emptyList()) + } + } + val codec = + if (nestedTypes.isEmpty()) codecRegistry.get(valueClass) else codecRegistry.get(valueClass, nestedTypes) + return ArrayCodec(kClass, codec) + } + } + + private val isPrimitiveArray = kClass.java.componentType != kClass.java.componentType.kotlin.javaObjectType + + override fun encode(writer: BsonWriter, arrayValue: R, encoderContext: EncoderContext) { + writer.writeStartArray() + + boxed(arrayValue).forEach { + if (it == null) writer.writeNull() else encoderContext.encodeWithChildContext(codec, writer, it) + } + + writer.writeEndArray() + } + + override fun getEncoderClass(): Class = kClass.java + + override fun decode(reader: BsonReader, decoderContext: DecoderContext): R { + reader.readStartArray() + val data = ArrayList() + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.currentBsonType == BsonType.NULL) { + reader.readNull() + data.add(null) + } else { + data.add(decoderContext.decodeWithChildContext(codec, reader)) + } + } + reader.readEndArray() + return unboxed(data) + } + + fun boxed(arrayValue: R): Iterable { + val boxedValue = + if (!isPrimitiveArray) { + (arrayValue as Array).asIterable() + } else if (arrayValue is BooleanArray) { + arrayValue.asIterable() + } else if (arrayValue is ByteArray) { + arrayValue.asIterable() + } else if (arrayValue is CharArray) { + arrayValue.asIterable() + } else if (arrayValue is DoubleArray) { + arrayValue.asIterable() + } else if (arrayValue is FloatArray) { + arrayValue.asIterable() + } else if (arrayValue is IntArray) { + arrayValue.asIterable() + } else if (arrayValue is LongArray) { + arrayValue.asIterable() + } else if (arrayValue is ShortArray) { + arrayValue.asIterable() + } else { + throw IllegalArgumentException("Unsupported array type ${arrayValue.javaClass}") + } + return boxedValue as Iterable + } + + private fun unboxed(data: ArrayList): R { + return when (kClass) { + BooleanArray::class -> (data as ArrayList).toBooleanArray() as R + ByteArray::class -> (data as ArrayList).toByteArray() as R + CharArray::class -> (data as ArrayList).toCharArray() as R + DoubleArray::class -> (data as ArrayList).toDoubleArray() as R + FloatArray::class -> (data as ArrayList).toFloatArray() as R + IntArray::class -> (data as ArrayList).toIntArray() as R + LongArray::class -> (data as ArrayList).toLongArray() as R + ShortArray::class -> (data as ArrayList).toShortArray() as R + else -> data.toArray(arrayOfNulls(data.size)) as R + } + } + + private fun arrayOfNulls(size: Int): Array { + return java.lang.reflect.Array.newInstance(codec.encoderClass, size) as Array + } +} diff --git a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodecProvider.kt b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodecProvider.kt new file mode 100644 index 00000000000..eccb5b88b27 --- /dev/null +++ b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodecProvider.kt @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import java.lang.reflect.Type +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecProvider +import org.bson.codecs.configuration.CodecRegistry + +/** A Kotlin reflection based Codec Provider for data classes */ +public class ArrayCodecProvider : CodecProvider { + override fun get(clazz: Class, registry: CodecRegistry): Codec? = get(clazz, emptyList(), registry) + + override fun get(clazz: Class, typeArguments: List, registry: CodecRegistry): Codec? = + if (clazz.isArray) { + ArrayCodec.create(clazz.kotlin, typeArguments, registry) + } else null +} diff --git a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodec.kt b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodec.kt index 5431a765d48..85e705cb8c0 100644 --- a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodec.kt +++ b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodec.kt @@ -210,7 +210,7 @@ internal data class DataClassCodec( is KTypeParameter -> { when (val pType = typeMap[kParameter.type.classifier] ?: kParameter.type.javaType) { is Class<*> -> - codecRegistry.getCodec(kParameter, (pType as Class).kotlin.javaObjectType, emptyList()) + codecRegistry.getCodec(kParameter, (pType as Class).kotlin.java, emptyList()) is ParameterizedType -> codecRegistry.getCodec( kParameter, @@ -235,11 +235,14 @@ internal data class DataClassCodec( @Suppress("UNCHECKED_CAST") private fun CodecRegistry.getCodec(kParameter: KParameter, clazz: Class, types: List): Codec { val codec = - if (types.isEmpty()) { + if (clazz.isArray) { + ArrayCodec.create(clazz.kotlin, types, this) + } else if (types.isEmpty()) { this.get(clazz) } else { this.get(clazz, types) } + return kParameter.findAnnotation()?.let { if (codec !is RepresentationConfigurable<*>) { throw CodecConfigurationException( diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt index e3cfe530705..c203a5d2358 100644 --- a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt @@ -37,6 +37,7 @@ import org.bson.codecs.kotlin.samples.DataClassSealedA import org.bson.codecs.kotlin.samples.DataClassSealedB import org.bson.codecs.kotlin.samples.DataClassSealedC import org.bson.codecs.kotlin.samples.DataClassSelfReferential +import org.bson.codecs.kotlin.samples.DataClassWithArrays import org.bson.codecs.kotlin.samples.DataClassWithBooleanMapKey import org.bson.codecs.kotlin.samples.DataClassWithBsonConstructor import org.bson.codecs.kotlin.samples.DataClassWithBsonDiscriminator @@ -56,6 +57,7 @@ import org.bson.codecs.kotlin.samples.DataClassWithListThatLastItemDefaultsToNul import org.bson.codecs.kotlin.samples.DataClassWithMutableList import org.bson.codecs.kotlin.samples.DataClassWithMutableMap import org.bson.codecs.kotlin.samples.DataClassWithMutableSet +import org.bson.codecs.kotlin.samples.DataClassWithNativeArrays import org.bson.codecs.kotlin.samples.DataClassWithNestedParameterized import org.bson.codecs.kotlin.samples.DataClassWithNestedParameterizedDataClass import org.bson.codecs.kotlin.samples.DataClassWithNullableGeneric @@ -112,6 +114,59 @@ class DataClassCodecTest { assertRoundTrips(expected, dataClass) } + @Test + fun testDataClassWithArrays() { + val expected = + """{ + | "arraySimple": ["a", "b", "c", "d"], + | "nestedArrays": [["e", "f"], [], ["g", "h"]], + | "arrayOfMaps": [{"A": ["aa"], "B": ["bb"]}, {}, {"C": ["cc", "ccc"]}], + |}""" + .trimMargin() + + val dataClass = + DataClassWithArrays( + arrayOf("a", "b", "c", "d"), + arrayOf(arrayOf("e", "f"), emptyArray(), arrayOf("g", "h")), + arrayOf( + mapOf("A" to arrayOf("aa"), "B" to arrayOf("bb")), emptyMap(), mapOf("C" to arrayOf("cc", "ccc")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNativeArrays() { + val expected = + """{ + | "booleanArray": [true, false], + | "byteArray": [1, 2], + | "charArray": ["a", "b"], + | "doubleArray": [ 1.1, 2.2, 3.3], + | "floatArray": [1.0, 2.0, 3.0], + | "intArray": [10, 20, 30, 40], + | "longArray": [{ "$numberLong": "111" }, { "$numberLong": "222" }, { "$numberLong": "333" }], + | "shortArray": [1, 2, 3], + | "listOfArrays": [[true, false], [false, true]], + | "mapOfArrays": {"A": [1, 2], "B":[], "C": [3, 4]} + |}""" + .trimMargin() + + val dataClass = + DataClassWithNativeArrays( + booleanArrayOf(true, false), + byteArrayOf(1, 2), + charArrayOf('a', 'b'), + doubleArrayOf(1.1, 2.2, 3.3), + floatArrayOf(1.0f, 2.0f, 3.0f), + intArrayOf(10, 20, 30, 40), + longArrayOf(111, 222, 333), + shortArrayOf(1, 2, 3), + listOf(booleanArrayOf(true, false), booleanArrayOf(false, true)), + mapOf(Pair("A", intArrayOf(1, 2)), Pair("B", intArrayOf()), Pair("C", intArrayOf(3, 4)))) + + assertRoundTrips(expected, dataClass) + } + @Test fun testDataClassWithDefaults() { val expectedDefault = @@ -534,5 +589,5 @@ class DataClassCodecTest { assertEquals(expected, decoded) } - private fun registry() = fromProviders(DataClassCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY) + private fun registry() = fromProviders(ArrayCodecProvider(), DataClassCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY) } diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt index aa2c8983b1d..77483cc9ee7 100644 --- a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt @@ -49,6 +49,91 @@ data class DataClassWithCollections( val mapMap: Map> ) +data class DataClassWithArrays( + val arraySimple: Array, + val nestedArrays: Array>, + val arrayOfMaps: Array>> +) { + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + + other as DataClassWithArrays + + if (!arraySimple.contentEquals(other.arraySimple)) return false + if (!nestedArrays.contentDeepEquals(other.nestedArrays)) return false + + if (arrayOfMaps.size != other.arrayOfMaps.size) return false + arrayOfMaps.forEachIndexed { i, map -> + val otherMap = other.arrayOfMaps[i] + if (map.keys != otherMap.keys) return false + map.keys.forEach { key -> if (!map[key].contentEquals(otherMap[key])) return false } + } + + return true + } + + override fun hashCode(): Int { + var result = arraySimple.contentHashCode() + result = 31 * result + nestedArrays.contentDeepHashCode() + result = 31 * result + arrayOfMaps.contentHashCode() + return result + } +} + +data class DataClassWithNativeArrays( + val booleanArray: BooleanArray, + val byteArray: ByteArray, + val charArray: CharArray, + val doubleArray: DoubleArray, + val floatArray: FloatArray, + val intArray: IntArray, + val longArray: LongArray, + val shortArray: ShortArray, + val listOfArrays: List, + val mapOfArrays: Map +) { + + @SuppressWarnings("ComplexMethod") + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + + other as DataClassWithNativeArrays + + if (!booleanArray.contentEquals(other.booleanArray)) return false + if (!byteArray.contentEquals(other.byteArray)) return false + if (!charArray.contentEquals(other.charArray)) return false + if (!doubleArray.contentEquals(other.doubleArray)) return false + if (!floatArray.contentEquals(other.floatArray)) return false + if (!intArray.contentEquals(other.intArray)) return false + if (!longArray.contentEquals(other.longArray)) return false + if (!shortArray.contentEquals(other.shortArray)) return false + + if (listOfArrays.size != other.listOfArrays.size) return false + listOfArrays.forEachIndexed { i, value -> if (!value.contentEquals(other.listOfArrays[i])) return false } + + if (mapOfArrays.keys != other.mapOfArrays.keys) return false + mapOfArrays.keys.forEach { key -> if (!mapOfArrays[key].contentEquals(other.mapOfArrays[key])) return false } + + return true + } + + override fun hashCode(): Int { + var result = booleanArray.contentHashCode() + result = 31 * result + byteArray.contentHashCode() + result = 31 * result + charArray.contentHashCode() + result = 31 * result + doubleArray.contentHashCode() + result = 31 * result + floatArray.contentHashCode() + result = 31 * result + intArray.contentHashCode() + result = 31 * result + longArray.contentHashCode() + result = 31 * result + shortArray.contentHashCode() + result = 31 * result + listOfArrays.hashCode() + result = 31 * result + mapOfArrays.hashCode() + return result + } +} + data class DataClassWithDefaults( val boolean: Boolean = false, val string: String = "String", diff --git a/driver-core/src/main/com/mongodb/KotlinCodecProvider.java b/driver-core/src/main/com/mongodb/KotlinCodecProvider.java index 5a1a2f84645..d3bc5fc5604 100644 --- a/driver-core/src/main/com/mongodb/KotlinCodecProvider.java +++ b/driver-core/src/main/com/mongodb/KotlinCodecProvider.java @@ -19,6 +19,7 @@ import org.bson.codecs.Codec; import org.bson.codecs.configuration.CodecProvider; import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.kotlin.ArrayCodecProvider; import org.bson.codecs.kotlin.DataClassCodecProvider; import org.bson.codecs.kotlinx.KotlinSerializerCodecProvider; @@ -26,6 +27,9 @@ import java.util.Collections; import java.util.List; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + /** * A CodecProvider for Kotlin data classes. * Delegates to {@code org.bson.codecs.kotlinx.KotlinSerializerCodecProvider} @@ -56,7 +60,7 @@ public class KotlinCodecProvider implements CodecProvider { possibleCodecProvider = null; try { Class.forName("org.bson.codecs.kotlin.DataClassCodecProvider"); // Kotlin bson canary test - possibleCodecProvider = new DataClassCodecProvider(); + possibleCodecProvider = fromProviders(new ArrayCodecProvider(), new DataClassCodecProvider()); } catch (ClassNotFoundException e) { // No kotlin data class support } From f3b42ebc1b813b76f22ce8b3b4c8a59dbd4c81ef Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Thu, 1 Aug 2024 13:52:03 +0100 Subject: [PATCH 58/90] Kotlin Spotless fix (#1468) JAVA-5539 --- .../kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt | 3 +-- .../src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt index 54688798987..68b937588d9 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt @@ -90,8 +90,7 @@ public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapp else MongoDriverInformation.builder(mongoDriverInformation) return MongoClient( JMongoClients.create( - settings, - builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) + settings, builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) } } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt index 09894c683bb..4d8d2f26cc0 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt @@ -88,8 +88,7 @@ public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapp else MongoDriverInformation.builder(mongoDriverInformation) return MongoClient( JMongoClients.create( - settings, - builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) + settings, builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) } } From cc90a512031a92b42743fa38098837d0af4c4c3f Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Thu, 1 Aug 2024 14:53:18 -0700 Subject: [PATCH 59/90] Ensure exception propagation in async try-catch block. (#1466) JAVA-5558 --- .../internal/connection/InternalStreamConnection.java | 8 ++++++-- .../InternalStreamConnectionSpecification.groovy | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java index 8c1b273c52b..98e43fe5fbe 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java @@ -689,8 +689,12 @@ public void sendMessageAsync( }).thenRunTryCatchAsyncBlocks(c -> { stream.writeAsync(byteBuffers, operationContext, c.asHandler()); }, Exception.class, (e, c) -> { - close(); - throwTranslatedWriteException(e, operationContext); + try { + close(); + throwTranslatedWriteException(e, operationContext); + } catch (Throwable translatedException) { + c.completeExceptionally(translatedException); + } }).finish(errorHandlingCallback(callback, LOGGER)); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy index 7a0dca34526..023b8f60079 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy @@ -1219,7 +1219,7 @@ class InternalStreamConnectionSpecification extends Specification { try { rcvdCallbck.get() false - } catch (MongoSocketWriteException) { + } catch (MongoSocketWriteException e) { true } } From a95b9cfb1b0cea9d29b12c60df0d83542a5c6b2f Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Fri, 2 Aug 2024 17:36:34 -0700 Subject: [PATCH 60/90] Remove assume from Client Side Encryption tests. (#1469) --- .../com/mongodb/client/AbstractClientSideEncryptionTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java index 87341a795ec..dca25078b7c 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java @@ -312,7 +312,6 @@ public void cleanUp() { @Test public void shouldPassAllOutcomes() { - assumeTrue("Skipping timeoutMS tests", filename.startsWith("timeoutMS.")); for (BsonValue cur : definition.getArray("operations")) { BsonDocument operation = cur.asDocument(); String operationName = operation.getString("name").getValue(); From da2ad6067de969fa54706757db70bf927e56dc08 Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Wed, 7 Aug 2024 17:20:10 +0100 Subject: [PATCH 61/90] Connection String (#1467) Don't output the host and port information if the port is invalid. Reduces risk of leaking password information if the password has not been correctly urlencoded. JAVA-5560 --- .../src/main/com/mongodb/ConnectionString.java | 18 ++++++++---------- .../com/mongodb/ConnectionStringUnitTest.java | 18 ++++++++++++++++++ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/driver-core/src/main/com/mongodb/ConnectionString.java b/driver-core/src/main/com/mongodb/ConnectionString.java index f779ab7290d..69db84eb072 100644 --- a/driver-core/src/main/com/mongodb/ConnectionString.java +++ b/driver-core/src/main/com/mongodb/ConnectionString.java @@ -1209,7 +1209,7 @@ private List parseHosts(final List rawHosts) { } int idx = host.indexOf("]:"); if (idx != -1) { - validatePort(host, host.substring(idx + 2)); + validatePort(host.substring(idx + 2)); } } else { int colonCount = countOccurrences(host, ":"); @@ -1218,7 +1218,7 @@ private List parseHosts(final List rawHosts) { + "Reserved characters such as ':' must be escaped according RFC 2396. " + "Any IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732.", host)); } else if (colonCount == 1) { - validatePort(host, host.substring(host.indexOf(":") + 1)); + validatePort(host.substring(host.indexOf(":") + 1)); } } hosts.add(host); @@ -1227,19 +1227,17 @@ private List parseHosts(final List rawHosts) { return hosts; } - private void validatePort(final String host, final String port) { - boolean invalidPort = false; + private void validatePort(final String port) { try { int portInt = Integer.parseInt(port); if (portInt <= 0 || portInt > 65535) { - invalidPort = true; + throw new IllegalArgumentException("The connection string contains an invalid host and port. " + + "The port must be an integer between 0 and 65535."); } } catch (NumberFormatException e) { - invalidPort = true; - } - if (invalidPort) { - throw new IllegalArgumentException(format("The connection string contains an invalid host '%s'. " - + "The port '%s' is not a valid, it must be an integer between 0 and 65535", host, port)); + throw new IllegalArgumentException("The connection string contains an invalid host and port. " + + "The port contains non-digit characters, it must be an integer between 0 and 65535. " + + "Hint: username and password must be escaped according to RFC 3986."); } } diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java index bc905c9c6d8..539a60ea5da 100644 --- a/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java @@ -19,6 +19,7 @@ import com.mongodb.connection.ServerMonitoringMode; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import java.io.UnsupportedEncodingException; @@ -27,6 +28,7 @@ import static org.junit.jupiter.api.Assertions.assertAll; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -92,4 +94,20 @@ void serverMonitoringMode() { () -> new ConnectionString(DEFAULT_OPTIONS + "serverMonitoringMode=invalid")) ); } + + + @ParameterizedTest + @ValueSource(strings = {"mongodb://foo:bar/@hostname/java?", "mongodb://foo:bar?@hostname/java/", + "mongodb+srv://foo:bar/@hostname/java?", "mongodb+srv://foo:bar?@hostname/java/", + "mongodb://foo:bar/@[::1]:27018", "mongodb://foo:bar?@[::1]:27018", + "mongodb://foo:12345678/@hostname", "mongodb+srv://foo:12345678/@hostname", + "mongodb://foo:12345678/@hostname", "mongodb+srv://foo:12345678/@hostname", + "mongodb://foo:12345678%40hostname", "mongodb+srv://foo:12345678%40hostname", + "mongodb://foo:12345678@bar@hostname", "mongodb+srv://foo:12345678@bar@hostname" + }) + void unescapedPasswordsShouldNotBeLeakedInExceptionMessages(final String input) { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> new ConnectionString(input)); + assertFalse(exception.getMessage().contains("bar")); + assertFalse(exception.getMessage().contains("12345678")); + } } From eeeb88f1f0ea89edb3889a398820c7739cd1e9db Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Thu, 8 Aug 2024 16:03:15 +0100 Subject: [PATCH 62/90] Checkstyle fix ConnectionStringUnitTest (#1475) --- .../src/test/unit/com/mongodb/ConnectionStringUnitTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java index 539a60ea5da..0b3dd1a0814 100644 --- a/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java @@ -19,7 +19,6 @@ import com.mongodb.connection.ServerMonitoringMode; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import java.io.UnsupportedEncodingException; From 5a94048bdcc0e219053b0f299f94d8cc0df235aa Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Fri, 9 Aug 2024 00:13:39 -0600 Subject: [PATCH 63/90] Fix most of the build warnings (#1476) The following commit introduced many warnings https://github.com/mongodb/mongo-java-driver/commit/01aff5a0789f71b9d0b56190d4996e8ac5436827 Many of them were then fixed in https://github.com/mongodb/mongo-java-driver/commit/cd297a13d1868e6a22b88529016a17fda618363d This commit fixes most of the ones that are still present --- .../ProtocolHelperSpecification.groovy | 29 +++++++++---------- .../OperationUnitSpecification.groovy | 2 +- ...ptionAwsCredentialFromEnvironmentTest.java | 8 ++--- 3 files changed, 17 insertions(+), 22 deletions(-) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy index 069ece30dbe..7e4fee2c2cf 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy @@ -33,7 +33,8 @@ import org.bson.BsonNull import org.bson.BsonString import spock.lang.Specification -import static com.mongodb.ClusterFixture.* +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException import static com.mongodb.internal.connection.ProtocolHelper.getQueryFailureException import static com.mongodb.internal.connection.ProtocolHelper.isCommandOk @@ -73,22 +74,19 @@ class ProtocolHelperSpecification extends Specification { def 'command failure exception should be MongoExecutionTimeoutException if error code is 50'() { expect: getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)), - new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) - instanceof MongoExecutionTimeoutException + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoExecutionTimeoutException } def 'command failure exception should be MongoOperationTimeoutException if error code is 50 and timeoutMS is set'() { expect: getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)), - new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) - instanceof MongoOperationTimeoutException + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) instanceof MongoOperationTimeoutException } def 'query failure exception should be MongoExecutionTimeoutException if error code is 50'() { expect: getQueryFailureException(new BsonDocument('code', new BsonInt32(50)), - new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) - instanceof MongoExecutionTimeoutException + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoExecutionTimeoutException } def 'query failure exception should be MongoOperationTimeoutException if error code is 50'() { @@ -97,13 +95,12 @@ class ProtocolHelperSpecification extends Specification { new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) exception instanceof MongoOperationTimeoutException exception.getCause() instanceof MongoExecutionTimeoutException - } def 'command failure exceptions should handle MongoNotPrimaryException scenarios'() { expect: - getCommandFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) - instanceof MongoNotPrimaryException + getCommandFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNotPrimaryException where: exception << [ @@ -115,8 +112,8 @@ class ProtocolHelperSpecification extends Specification { def 'query failure exceptions should handle MongoNotPrimaryException scenarios'() { expect: - getQueryFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) - instanceof MongoNotPrimaryException + getQueryFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNotPrimaryException where: exception << [ @@ -128,8 +125,8 @@ class ProtocolHelperSpecification extends Specification { def 'command failure exceptions should handle MongoNodeIsRecoveringException scenarios'() { expect: - getCommandFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) - instanceof MongoNodeIsRecoveringException + getCommandFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNodeIsRecoveringException where: exception << [ @@ -144,8 +141,8 @@ class ProtocolHelperSpecification extends Specification { def 'query failure exceptions should handle MongoNodeIsRecoveringException scenarios'() { expect: - getQueryFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) - instanceof MongoNodeIsRecoveringException + getQueryFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNodeIsRecoveringException where: exception << [ diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy index 11710eff7df..6305988116d 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy @@ -68,7 +68,7 @@ class OperationUnitSpecification extends Specification { [9, 0]: 25, ] - static int getMaxWireVersionForServerVersion(List serverVersion) { + static Integer getMaxWireVersionForServerVersion(List serverVersion) { def maxWireVersion = SERVER_TO_WIRE_VERSION_MAP[serverVersion.subList(0, 2)] if (maxWireVersion == null) { diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java index b5b6c7101b5..3a60a038a7d 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java @@ -230,8 +230,7 @@ void shouldThrowMongoCryptExceptionWhenNamedKMSProviderUsesEmptyOnDemandCredenti .build(); MongoCryptException e = assertThrows(MongoCryptException.class, () -> { - try (ClientEncryption ignore = createClientEncryption(settings)) {//NOP - } + createClientEncryption(settings).close(); }); assertTrue(e.getMessage().contains("On-demand credentials are not supported for named KMS providers.")); } @@ -267,10 +266,9 @@ public void shouldThrowMongoCryptExceptionWhenNamedKMSProviderUsesEmptyOnDemandC .build(); MongoCryptException e = assertThrows(MongoCryptException.class, () -> { - try (MongoClient ignore = createMongoClient(getMongoClientSettingsBuilder() + createMongoClient(getMongoClientSettingsBuilder() .autoEncryptionSettings(autoEncryptionSettings) - .build())) {//NOP - } + .build()).close(); }); assertTrue(e.getMessage().contains("On-demand credentials are not supported for named KMS providers.")); } From 81402ae070244dfe5ba8f70d93b00798d8dd39c7 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Tue, 13 Aug 2024 11:46:27 -0600 Subject: [PATCH 64/90] Do minor improvements to how we use field validators (#1477) Stop unnecessarily creating `NoOpFieldNameValidator`, `ReplacingDocumentFieldNameValidator` --- bson/src/main/org/bson/AbstractBsonWriter.java | 2 +- bson/src/main/org/bson/BsonBinaryWriter.java | 2 +- .../src/main/org/bson/NoOpFieldNameValidator.java | 7 ++++++- .../internal/connection/CommandHelper.java | 2 +- .../internal/connection/DefaultServerMonitor.java | 2 +- .../operation/AsyncCommandBatchCursor.java | 6 +++--- .../internal/operation/AsyncOperationHelper.java | 4 ++-- .../internal/operation/BulkWriteBatch.java | 13 ++++++------- .../internal/operation/CommandBatchCursor.java | 6 +++--- .../operation/CommandBatchCursorHelper.java | 3 --- .../operation/FindAndDeleteOperation.java | 2 +- .../operation/FindAndReplaceOperation.java | 10 ++++------ .../operation/FindAndUpdateOperation.java | 7 ++----- .../operation/MixedBulkWriteOperation.java | 6 ++---- .../internal/operation/SyncOperationHelper.java | 8 ++++---- .../internal/operation/TransactionOperation.java | 4 ++-- .../internal/session/ServerSessionPool.java | 2 +- .../validator/MappedFieldNameValidator.java | 6 +----- .../validator/NoOpFieldNameValidator.java | 7 ++++++- .../ReplacingDocumentFieldNameValidator.java | 9 ++++++--- .../validator/UpdateFieldNameValidator.java | 12 ++++++------ .../OperationFunctionalSpecification.groovy | 8 -------- .../com/mongodb/client/model/OperationTest.java | 3 --- .../connection/SingleServerClusterTest.java | 2 +- .../AsyncCommandBatchCursorFunctionalTest.java | 5 +++-- .../CommandBatchCursorFunctionalTest.java | 5 +++-- .../internal/operation/TestOperationHelper.java | 4 ++-- .../connection/CommandMessageSpecification.groovy | 4 ++-- .../internal/connection/CommandMessageTest.java | 6 ++---- .../DefaultServerConnectionSpecification.groovy | 2 +- .../connection/DefaultServerSpecification.groovy | 6 ++---- .../InternalStreamConnectionSpecification.groovy | 2 +- .../LoggingCommandEventSenderSpecification.groovy | 8 ++++---- .../internal/connection/StreamHelper.groovy | 2 +- .../UsageTrackingConnectionSpecification.groovy | 4 ++-- .../AsyncOperationHelperSpecification.groovy | 2 +- .../SyncOperationHelperSpecification.groovy | 2 +- .../ReplacingDocumentFieldNameValidatorTest.java | 15 +++++++-------- .../src/main/com/mongodb/MongoClient.java | 2 +- .../internal/CryptConnectionSpecification.groovy | 10 +++++----- 40 files changed, 98 insertions(+), 114 deletions(-) diff --git a/bson/src/main/org/bson/AbstractBsonWriter.java b/bson/src/main/org/bson/AbstractBsonWriter.java index b256c9b5545..a7cc978f8ba 100644 --- a/bson/src/main/org/bson/AbstractBsonWriter.java +++ b/bson/src/main/org/bson/AbstractBsonWriter.java @@ -47,7 +47,7 @@ public abstract class AbstractBsonWriter implements BsonWriter, Closeable { * @param settings The writer settings. */ protected AbstractBsonWriter(final BsonWriterSettings settings) { - this(settings, new NoOpFieldNameValidator()); + this(settings, NoOpFieldNameValidator.INSTANCE); } /** diff --git a/bson/src/main/org/bson/BsonBinaryWriter.java b/bson/src/main/org/bson/BsonBinaryWriter.java index 95fe1e88f1f..d9301fd5cb3 100644 --- a/bson/src/main/org/bson/BsonBinaryWriter.java +++ b/bson/src/main/org/bson/BsonBinaryWriter.java @@ -67,7 +67,7 @@ public BsonBinaryWriter(final BsonOutput bsonOutput) { */ public BsonBinaryWriter(final BsonWriterSettings settings, final BsonBinaryWriterSettings binaryWriterSettings, final BsonOutput bsonOutput) { - this(settings, binaryWriterSettings, bsonOutput, new NoOpFieldNameValidator()); + this(settings, binaryWriterSettings, bsonOutput, NoOpFieldNameValidator.INSTANCE); } /** diff --git a/bson/src/main/org/bson/NoOpFieldNameValidator.java b/bson/src/main/org/bson/NoOpFieldNameValidator.java index 9d47705f574..33353498986 100644 --- a/bson/src/main/org/bson/NoOpFieldNameValidator.java +++ b/bson/src/main/org/bson/NoOpFieldNameValidator.java @@ -16,7 +16,12 @@ package org.bson; -class NoOpFieldNameValidator implements FieldNameValidator { +final class NoOpFieldNameValidator implements FieldNameValidator { + static final NoOpFieldNameValidator INSTANCE = new NoOpFieldNameValidator(); + + private NoOpFieldNameValidator() { + } + @Override public boolean validate(final String fieldName) { return true; diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java index 31737d7b22b..11dfd94e935 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java @@ -105,7 +105,7 @@ private static CommandMessage getCommandMessage(final String database, final Bso final InternalConnection internalConnection, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { - return new CommandMessage(new MongoNamespace(database, COMMAND_COLLECTION_NAME), command, new NoOpFieldNameValidator(), primary(), + return new CommandMessage(new MongoNamespace(database, COMMAND_COLLECTION_NAME), command, NoOpFieldNameValidator.INSTANCE, primary(), MessageSettings .builder() // Note: server version will be 0.0 at this point when called from InternalConnectionInitializer, diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java index 656c9bc7779..03a0309a10e 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java @@ -308,7 +308,7 @@ private boolean shouldStreamResponses(final ServerDescription currentServerDescr private CommandMessage createCommandMessage(final BsonDocument command, final InternalConnection connection, final ServerDescription currentServerDescription) { return new CommandMessage(new MongoNamespace("admin", COMMAND_COLLECTION_NAME), command, - new NoOpFieldNameValidator(), primary(), + NoOpFieldNameValidator.INSTANCE, primary(), MessageSettings.builder() .maxWireVersion(connection.getDescription().getMaxWireVersion()) .build(), diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java index eec8721fbf1..56ca59d14ad 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java @@ -37,6 +37,7 @@ import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.operation.AsyncOperationHelper.AsyncCallableConnectionWithCallback; +import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonTimestamp; @@ -53,7 +54,6 @@ import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; import static com.mongodb.internal.operation.CommandBatchCursorHelper.NEXT_BATCH; -import static com.mongodb.internal.operation.CommandBatchCursorHelper.NO_OP_FIELD_NAME_VALIDATOR; import static com.mongodb.internal.operation.CommandBatchCursorHelper.getKillCursorsCommand; import static com.mongodb.internal.operation.CommandBatchCursorHelper.getMoreCommandDocument; import static com.mongodb.internal.operation.CommandBatchCursorHelper.logCommandCursorResult; @@ -177,7 +177,7 @@ private void getMoreLoop(final AsyncConnection connection, final ServerCursor se final SingleResultCallback> callback) { connection.commandAsync(namespace.getDatabaseName(), getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), CommandResultDocumentCodec.create(decoder, NEXT_BATCH), assertNotNull(resourceManager.getConnectionSource()).getOperationContext(), (commandResult, t) -> { @@ -334,7 +334,7 @@ private void killServerCursor(final MongoNamespace namespace, final ServerCursor timeoutContext.resetToDefaultMaxTime(); localConnection.commandAsync(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext, (r, t) -> callback.onResult(null, null)); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java index 35782219545..b3781fc66ff 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java @@ -225,7 +225,7 @@ static void executeCommandAsync(final AsyncWriteBinding binding, Assertions.notNull("binding", binding); SingleResultCallback addingRetryableLabelCallback = addingRetryableLabelCallback(callback, connection.getDescription().getMaxWireVersion()); - connection.commandAsync(database, command, new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), + connection.commandAsync(database, command, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), binding.getOperationContext(), transformingWriteCallback(transformer, connection, addingRetryableLabelCallback)); } @@ -306,7 +306,7 @@ static void createReadCommandAndExecuteAsync( callback.onResult(null, e); return; } - connection.commandAsync(database, command, new NoOpFieldNameValidator(), source.getReadPreference(), decoder, + connection.commandAsync(database, command, NoOpFieldNameValidator.INSTANCE, source.getReadPreference(), decoder, operationContext, transformingReadCallback(transformer, source, connection, callback)); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java index f1551da3b2d..b5d36934605 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java @@ -54,7 +54,6 @@ import org.bson.codecs.configuration.CodecRegistry; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -69,6 +68,7 @@ import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; import static com.mongodb.internal.operation.WriteConcernHelper.createWriteConcernError; +import static java.util.Collections.singletonMap; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; /** @@ -77,7 +77,6 @@ public final class BulkWriteBatch { private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); private static final Decoder DECODER = REGISTRY.get(BsonDocument.class); - private static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); private final MongoNamespace namespace; private final ConnectionDescription connectionDescription; @@ -279,15 +278,15 @@ BulkWriteBatch getNextBatch() { FieldNameValidator getFieldNameValidator() { if (batchType == UPDATE || batchType == REPLACE) { - Map rootMap = new HashMap<>(); + Map rootMap; if (batchType == REPLACE) { - rootMap.put("u", new ReplacingDocumentFieldNameValidator()); + rootMap = singletonMap("u", ReplacingDocumentFieldNameValidator.INSTANCE); } else { - rootMap.put("u", new UpdateFieldNameValidator()); + rootMap = singletonMap("u", new UpdateFieldNameValidator()); } - return new MappedFieldNameValidator(NO_OP_FIELD_NAME_VALIDATOR, rootMap); + return new MappedFieldNameValidator(NoOpFieldNameValidator.INSTANCE, rootMap); } else { - return NO_OP_FIELD_NAME_VALIDATOR; + return NoOpFieldNameValidator.INSTANCE; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java index 410098db2c0..3ac893d3178 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java @@ -33,6 +33,7 @@ import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonTimestamp; @@ -52,7 +53,6 @@ import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_ITERATOR; import static com.mongodb.internal.operation.CommandBatchCursorHelper.NEXT_BATCH; -import static com.mongodb.internal.operation.CommandBatchCursorHelper.NO_OP_FIELD_NAME_VALIDATOR; import static com.mongodb.internal.operation.CommandBatchCursorHelper.getKillCursorsCommand; import static com.mongodb.internal.operation.CommandBatchCursorHelper.getMoreCommandDocument; import static com.mongodb.internal.operation.CommandBatchCursorHelper.logCommandCursorResult; @@ -237,7 +237,7 @@ private void getMore() { assertNotNull( connection.command(namespace.getDatabaseName(), getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment), - NO_OP_FIELD_NAME_VALIDATOR, + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), CommandResultDocumentCodec.create(decoder, NEXT_BATCH), assertNotNull(resourceManager.getConnectionSource()).getOperationContext()))); @@ -374,7 +374,7 @@ private void killServerCursor(final MongoNamespace namespace, final ServerCursor timeoutContext.resetToDefaultMaxTime(); localConnection.command(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), operationContext); + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext); } } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java index cd7d2468e7f..2a6e3b061ee 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java @@ -22,7 +22,6 @@ import com.mongodb.MongoQueryException; import com.mongodb.ServerCursor; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; @@ -30,7 +29,6 @@ import org.bson.BsonInt64; import org.bson.BsonString; import org.bson.BsonValue; -import org.bson.FieldNameValidator; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.OperationHelper.LOGGER; @@ -42,7 +40,6 @@ final class CommandBatchCursorHelper { static final String FIRST_BATCH = "firstBatch"; static final String NEXT_BATCH = "nextBatch"; - static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); static final String MESSAGE_IF_CLOSED_AS_CURSOR = "Cursor has been closed"; static final String MESSAGE_IF_CLOSED_AS_ITERATOR = "Iterator has been closed"; diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java index c284b942fe4..373b17949dc 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java @@ -89,7 +89,7 @@ public FindAndDeleteOperation let(@Nullable final BsonDocument variables) { } protected FieldNameValidator getFieldNameValidator() { - return new NoOpFieldNameValidator(); + return NoOpFieldNameValidator.INSTANCE; } protected void specializeCommand(final BsonDocument commandDocument, final ConnectionDescription connectionDescription) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java index 3c143fdde36..59362cc667d 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java @@ -30,11 +30,9 @@ import org.bson.FieldNameValidator; import org.bson.codecs.Decoder; -import java.util.HashMap; -import java.util.Map; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; +import static java.util.Collections.singletonMap; /** * An operation that atomically finds and replaces a single document. @@ -133,9 +131,9 @@ public FindAndReplaceOperation let(@Nullable final BsonDocument variables) { } protected FieldNameValidator getFieldNameValidator() { - Map map = new HashMap<>(); - map.put("update", new ReplacingDocumentFieldNameValidator()); - return new MappedFieldNameValidator(new NoOpFieldNameValidator(), map); + return new MappedFieldNameValidator( + NoOpFieldNameValidator.INSTANCE, + singletonMap("update", ReplacingDocumentFieldNameValidator.INSTANCE)); } protected void specializeCommand(final BsonDocument commandDocument, final ConnectionDescription connectionDescription) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java index 46e1994985c..bba62d62628 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java @@ -31,13 +31,12 @@ import org.bson.FieldNameValidator; import org.bson.codecs.Decoder; -import java.util.HashMap; import java.util.List; -import java.util.Map; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; +import static java.util.Collections.singletonMap; /** * An operation that atomically finds and updates a single document. @@ -161,9 +160,7 @@ public FindAndUpdateOperation let(@Nullable final BsonDocument variables) { } protected FieldNameValidator getFieldNameValidator() { - Map map = new HashMap<>(); - map.put("update", new UpdateFieldNameValidator()); - return new MappedFieldNameValidator(new NoOpFieldNameValidator(), map); + return new MappedFieldNameValidator(NoOpFieldNameValidator.INSTANCE, singletonMap("update", new UpdateFieldNameValidator())); } protected void specializeCommand(final BsonDocument commandDocument, final ConnectionDescription connectionDescription) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java index c506bbda2fe..398925511e0 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -47,7 +47,6 @@ import org.bson.BsonDocument; import org.bson.BsonString; import org.bson.BsonValue; -import org.bson.FieldNameValidator; import java.util.List; import java.util.Optional; @@ -77,7 +76,6 @@ *

      This class is not part of the public API and may be removed or changed at any time

      */ public class MixedBulkWriteOperation implements AsyncWriteOperation, WriteOperation { - private static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); private final MongoNamespace namespace; private final List writeRequests; private final boolean ordered; @@ -408,14 +406,14 @@ private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetrySta @Nullable private BsonDocument executeCommand(final OperationContext operationContext, final Connection connection, final BulkWriteBatch batch) { - return connection.command(namespace.getDatabaseName(), batch.getCommand(), NO_OP_FIELD_NAME_VALIDATOR, null, batch.getDecoder(), + return connection.command(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()), batch.getPayload(), batch.getFieldNameValidator()); } private void executeCommandAsync(final OperationContext operationContext, final AsyncConnection connection, final BulkWriteBatch batch, final SingleResultCallback callback) { - connection.commandAsync(namespace.getDatabaseName(), batch.getCommand(), NO_OP_FIELD_NAME_VALIDATOR, null, batch.getDecoder(), + connection.commandAsync(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()), batch.getPayload(), batch.getFieldNameValidator(), callback); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java index 43334109c20..62da7cde2c8 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java @@ -210,7 +210,7 @@ static T executeCommand(final WriteBinding binding, final String database, f commandCreator.create(binding.getOperationContext(), source.getServerDescription(), connection.getDescription()), - new NoOpFieldNameValidator(), primary(), BSON_DOCUMENT_CODEC, binding.getOperationContext())), + NoOpFieldNameValidator.INSTANCE, primary(), BSON_DOCUMENT_CODEC, binding.getOperationContext())), connection)); } @@ -219,7 +219,7 @@ static T executeCommand(final WriteBinding binding, final String database final Decoder decoder, final CommandWriteTransformer transformer) { return withSourceAndConnection(binding::getWriteConnectionSource, false, (source, connection) -> transformer.apply(assertNotNull( - connection.command(database, command, new NoOpFieldNameValidator(), primary(), decoder, + connection.command(database, command, NoOpFieldNameValidator.INSTANCE, primary(), decoder, binding.getOperationContext())), connection)); } @@ -228,7 +228,7 @@ static T executeCommand(final WriteBinding binding, final String database, f final Connection connection, final CommandWriteTransformer transformer) { notNull("binding", binding); return transformer.apply(assertNotNull( - connection.command(database, command, new NoOpFieldNameValidator(), primary(), BSON_DOCUMENT_CODEC, + connection.command(database, command, NoOpFieldNameValidator.INSTANCE, primary(), BSON_DOCUMENT_CODEC, binding.getOperationContext())), connection); } @@ -295,7 +295,7 @@ static T createReadCommandAndExecute( BsonDocument command = commandCreator.create(operationContext, source.getServerDescription(), connection.getDescription()); retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false); - return transformer.apply(assertNotNull(connection.command(database, command, new NoOpFieldNameValidator(), + return transformer.apply(assertNotNull(connection.command(database, command, NoOpFieldNameValidator.INSTANCE, source.getReadPreference(), decoder, operationContext)), source, connection); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java index 3bb04efa8ed..8bf7ee76d25 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java @@ -57,7 +57,7 @@ public WriteConcern getWriteConcern() { public Void execute(final WriteBinding binding) { isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction()); TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); - return executeRetryableWrite(binding, "admin", null, new NoOpFieldNameValidator(), + return executeRetryableWrite(binding, "admin", null, NoOpFieldNameValidator.INSTANCE, new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformer(timeoutContext), getRetryCommandModifier(timeoutContext)); } @@ -66,7 +66,7 @@ public Void execute(final WriteBinding binding) { public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction()); TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); - executeRetryableWriteAsync(binding, "admin", null, new NoOpFieldNameValidator(), + executeRetryableWriteAsync(binding, "admin", null, NoOpFieldNameValidator.INSTANCE, new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformerAsync(timeoutContext), getRetryCommandModifier(timeoutContext), errorHandlingCallback(callback, LOGGER)); diff --git a/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java index 6f118f0eddb..9111eaed3a9 100644 --- a/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java +++ b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java @@ -156,7 +156,7 @@ public String toString() { operationContext).getServer().getConnection(operationContext); connection.command("admin", - new BsonDocument("endSessions", new BsonArray(identifiers)), new NoOpFieldNameValidator(), + new BsonDocument("endSessions", new BsonArray(identifiers)), NoOpFieldNameValidator.INSTANCE, ReadPreference.primaryPreferred(), new BsonDocumentCodec(), operationContext); } catch (MongoException e) { // ignore exceptions diff --git a/driver-core/src/main/com/mongodb/internal/validator/MappedFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/MappedFieldNameValidator.java index a3f5bd4bdbf..3e7956f06ed 100644 --- a/driver-core/src/main/com/mongodb/internal/validator/MappedFieldNameValidator.java +++ b/driver-core/src/main/com/mongodb/internal/validator/MappedFieldNameValidator.java @@ -55,10 +55,6 @@ public String getValidationErrorMessage(final String fieldName) { @Override public FieldNameValidator getValidatorForField(final String fieldName) { - if (fieldNameToValidatorMap.containsKey(fieldName)) { - return fieldNameToValidatorMap.get(fieldName); - } else { - return defaultValidator; - } + return fieldNameToValidatorMap.getOrDefault(fieldName, defaultValidator); } } diff --git a/driver-core/src/main/com/mongodb/internal/validator/NoOpFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/NoOpFieldNameValidator.java index c7210085f6f..160406aedaf 100644 --- a/driver-core/src/main/com/mongodb/internal/validator/NoOpFieldNameValidator.java +++ b/driver-core/src/main/com/mongodb/internal/validator/NoOpFieldNameValidator.java @@ -23,7 +23,12 @@ * *

      This class is not part of the public API and may be removed or changed at any time

      */ -public class NoOpFieldNameValidator implements FieldNameValidator { +public final class NoOpFieldNameValidator implements FieldNameValidator { + public static final NoOpFieldNameValidator INSTANCE = new NoOpFieldNameValidator(); + + private NoOpFieldNameValidator() { + } + @Override public boolean validate(final String fieldName) { return true; diff --git a/driver-core/src/main/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidator.java index 9086f5dca1b..d6d815a529f 100644 --- a/driver-core/src/main/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidator.java +++ b/driver-core/src/main/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidator.java @@ -30,11 +30,14 @@ * *

      This class is not part of the public API and may be removed or changed at any time

      */ -public class ReplacingDocumentFieldNameValidator implements FieldNameValidator { - private static final NoOpFieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); +public final class ReplacingDocumentFieldNameValidator implements FieldNameValidator { + public static final ReplacingDocumentFieldNameValidator INSTANCE = new ReplacingDocumentFieldNameValidator(); // Have to support DBRef fields private static final List EXCEPTIONS = Arrays.asList("$db", "$ref", "$id"); + private ReplacingDocumentFieldNameValidator() { + } + @Override public boolean validate(final String fieldName) { return !fieldName.startsWith("$") || EXCEPTIONS.contains(fieldName); @@ -49,6 +52,6 @@ public String getValidationErrorMessage(final String fieldName) { @Override public FieldNameValidator getValidatorForField(final String fieldName) { // Only top-level fields are validated - return NO_OP_FIELD_NAME_VALIDATOR; + return NoOpFieldNameValidator.INSTANCE; } } diff --git a/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java index 821ee2eeebf..40762bfb5fb 100644 --- a/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java +++ b/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java @@ -26,12 +26,12 @@ * *

      This class is not part of the public API and may be removed or changed at any time

      */ -public class UpdateFieldNameValidator implements org.bson.FieldNameValidator { - private int numFields = 0; +public final class UpdateFieldNameValidator implements org.bson.FieldNameValidator { + private boolean encounteredField = false; @Override public boolean validate(final String fieldName) { - numFields++; + encounteredField = true; return fieldName.startsWith("$"); } @@ -43,17 +43,17 @@ public String getValidationErrorMessage(final String fieldName) { @Override public FieldNameValidator getValidatorForField(final String fieldName) { - return new NoOpFieldNameValidator(); + return NoOpFieldNameValidator.INSTANCE; } @Override public void start() { - numFields = 0; + encounteredField = false; } @Override public void end() { - if (numFields == 0) { + if (!encounteredField) { throw new IllegalArgumentException("Invalid BSON document for an update. The document may not be empty."); } } diff --git a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy index adf707b9cb7..9fc12eddd93 100644 --- a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy @@ -53,10 +53,8 @@ import com.mongodb.internal.operation.MixedBulkWriteOperation import com.mongodb.internal.operation.ReadOperation import com.mongodb.internal.operation.WriteOperation import com.mongodb.internal.session.SessionContext -import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonDocument import org.bson.Document -import org.bson.FieldNameValidator import org.bson.codecs.DocumentCodec import spock.lang.Shared import spock.lang.Specification @@ -536,10 +534,4 @@ class OperationFunctionalSpecification extends Specification { .locale('en') .collationStrength(CollationStrength.SECONDARY) .build() - - static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator() - - static boolean serverVersionIsGreaterThan(List actualVersion, List minVersion) { - new ServerVersion(actualVersion).compareTo(new ServerVersion(minVersion)) >= 0 - } } diff --git a/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java b/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java index 5aaac1f70bb..aa4e5cbdf23 100644 --- a/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java +++ b/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java @@ -21,14 +21,12 @@ import com.mongodb.async.FutureResultCallback; import com.mongodb.client.test.CollectionHelper; import com.mongodb.internal.connection.ServerHelper; -import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonDouble; import org.bson.BsonValue; import org.bson.Document; -import org.bson.FieldNameValidator; import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.DecoderContext; import org.bson.codecs.DocumentCodec; @@ -61,7 +59,6 @@ public abstract class OperationTest { protected static final DocumentCodec DOCUMENT_DECODER = new DocumentCodec(); - protected static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); @BeforeEach public void beforeEach() { diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java index ae7166300e8..d66bcff46e3 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java @@ -110,7 +110,7 @@ public void shouldSuccessfullyQueryASecondaryWithPrimaryReadPreference() { // when BsonDocument result = connection.command(getDefaultDatabaseName(), new BsonDocument("count", new BsonString(collectionName)), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), operationContext); + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext); // then assertEquals(new BsonDouble(1.0).intValue(), result.getNumber("ok").intValue()); diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java index 93449a6558b..a272f8b0f67 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java @@ -26,6 +26,7 @@ import com.mongodb.client.model.OperationTest; import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.validator.NoOpFieldNameValidator; import org.bson.BsonArray; import org.bson.BsonBoolean; import org.bson.BsonDocument; @@ -349,7 +350,7 @@ void shouldThrowCursorNotFoundException() throws Throwable { this.block(cb -> localConnection.commandAsync(getNamespace().getDatabaseName(), new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext(), cb)); + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext(), cb)); localConnection.release(); cursorNext(); @@ -414,7 +415,7 @@ private BsonDocument executeFindCommand(final BsonDocument filter, final int lim } BsonDocument results = block(cb -> connection.commandAsync(getDatabaseName(), findCommand, - NO_OP_FIELD_NAME_VALIDATOR, readPreference, CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), + NoOpFieldNameValidator.INSTANCE, readPreference, CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), connectionSource.getOperationContext(), cb)); assertNotNull(results); diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java index 7b9fd7b4e57..57caf3bdbfc 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java @@ -25,6 +25,7 @@ import com.mongodb.client.model.OperationTest; import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.validator.NoOpFieldNameValidator; import org.bson.BsonArray; import org.bson.BsonBoolean; import org.bson.BsonDocument; @@ -439,7 +440,7 @@ void shouldThrowCursorNotFoundException() { localConnection.command(getNamespace().getDatabaseName(), new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext()); + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext()); localConnection.release(); cursor.next(); @@ -529,7 +530,7 @@ private BsonDocument executeFindCommand(final BsonDocument filter, final int lim } BsonDocument results = connection.command(getDatabaseName(), findCommand, - NO_OP_FIELD_NAME_VALIDATOR, readPreference, + NoOpFieldNameValidator.INSTANCE, readPreference, CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), connectionSource.getOperationContext()); diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java index 0eeeff8bdb4..824517e10db 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java @@ -56,7 +56,7 @@ static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final Serv connection.command(namespace.getDatabaseName(), new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) .append("collection", new BsonString(namespace.getCollectionName())), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT)); + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT)); } static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final ServerCursor serverCursor, @@ -66,7 +66,7 @@ static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final Serv connection.commandAsync(namespace.getDatabaseName(), new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) .append("collection", new BsonString(namespace.getCollectionName())), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT, callback); + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT, callback); callback.get(); }); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy index 427fe23c613..8c10755cca8 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy @@ -51,7 +51,7 @@ class CommandMessageSpecification extends Specification { def namespace = new MongoNamespace('db.test') def command = new BsonDocument('find', new BsonString(namespace.collectionName)) - def fieldNameValidator = new NoOpFieldNameValidator() + def fieldNameValidator = NoOpFieldNameValidator.INSTANCE def 'should encode command message with OP_MSG when server version is >= 3.6'() { given: @@ -149,7 +149,7 @@ class CommandMessageSpecification extends Specification { def 'should get command document'() { given: def message = new CommandMessage(namespace, originalCommandDocument, fieldNameValidator, ReadPreference.primary(), - MessageSettings.builder().maxWireVersion(maxWireVersion).build(), true, payload, new NoOpFieldNameValidator(), + MessageSettings.builder().maxWireVersion(maxWireVersion).build(), true, payload, NoOpFieldNameValidator.INSTANCE, ClusterConnectionMode.MULTIPLE, null) def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java index f08086be5e8..4735811f025 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java @@ -28,7 +28,6 @@ import org.bson.BsonDocument; import org.bson.BsonString; import org.bson.BsonTimestamp; -import org.bson.FieldNameValidator; import org.bson.io.BasicOutputBuffer; import org.junit.jupiter.api.Test; @@ -44,12 +43,11 @@ class CommandMessageTest { private static final MongoNamespace NAMESPACE = new MongoNamespace("db.test"); private static final BsonDocument COMMAND = new BsonDocument("find", new BsonString(NAMESPACE.getCollectionName())); - private static final FieldNameValidator FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); @Test void encodeShouldThrowTimeoutExceptionWhenTimeoutContextIsCalled() { //given - CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, FIELD_NAME_VALIDATOR, ReadPreference.primary(), + CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), MessageSettings.builder() .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION) .serverType(ServerType.REPLICA_SET_SECONDARY) @@ -75,7 +73,7 @@ void encodeShouldThrowTimeoutExceptionWhenTimeoutContextIsCalled() { @Test void encodeShouldNotAddExtraElementsFromTimeoutContextWhenConnectedToMongoCrypt() { //given - CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, FIELD_NAME_VALIDATOR, ReadPreference.primary(), + CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), MessageSettings.builder() .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION) .serverType(ServerType.REPLICA_SET_SECONDARY) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy index 5b894c7a735..282c4dbb868 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy @@ -39,7 +39,7 @@ class DefaultServerConnectionSpecification extends Specification { def 'should execute command protocol asynchronously'() { given: def command = new BsonDocument(LEGACY_HELLO_LOWER, new BsonInt32(1)) - def validator = new NoOpFieldNameValidator() + def validator = NoOpFieldNameValidator.INSTANCE def codec = new BsonDocumentCodec() def executor = Mock(ProtocolExecutor) def connection = new DefaultServerConnection(internalConnection, executor, ClusterConnectionMode.MULTIPLE) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy index f054457b877..21f03260818 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy @@ -44,7 +44,6 @@ import com.mongodb.internal.time.Timeout import com.mongodb.internal.validator.NoOpFieldNameValidator import org.bson.BsonDocument import org.bson.BsonInt32 -import org.bson.FieldNameValidator import org.bson.codecs.BsonDocumentCodec import spock.lang.Specification @@ -56,7 +55,6 @@ import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE import static com.mongodb.connection.ClusterConnectionMode.SINGLE class DefaultServerSpecification extends Specification { - private static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator() def serverId = new ServerId(new ClusterId(), new ServerAddress()) def 'should get a connection'() { @@ -311,13 +309,13 @@ class DefaultServerSpecification extends Specification { when: if (async) { CountDownLatch latch = new CountDownLatch(1) - testConnection.commandAsync('admin', new BsonDocument('ping', new BsonInt32(1)), NO_OP_FIELD_NAME_VALIDATOR, + testConnection.commandAsync('admin', new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext) { BsonDocument result, Throwable t -> latch.countDown() } latch.await() } else { - testConnection.command('admin', new BsonDocument('ping', new BsonInt32(1)), NO_OP_FIELD_NAME_VALIDATOR, + testConnection.command('admin', new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext) } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy index 023b8f60079..5456bddb654 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy @@ -78,7 +78,7 @@ class InternalStreamConnectionSpecification extends Specification { private static final ServerId SERVER_ID = new ServerId(new ClusterId(), new ServerAddress()) def cmdNamespace = new MongoNamespace('admin.$cmd') - def fieldNameValidator = new NoOpFieldNameValidator() + def fieldNameValidator = NoOpFieldNameValidator.INSTANCE def helper = new StreamHelper() def serverAddress = new ServerAddress() def connectionId = new ConnectionId(SERVER_ID, 1, 1) diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy index b317f3dd0ba..6f8eaf33314 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy @@ -57,7 +57,7 @@ class LoggingCommandEventSenderSpecification extends Specification { def replyDocument = new BsonDocument('ok', new BsonInt32(1)) def failureException = new MongoInternalException('failure!') def message = new CommandMessage(namespace, commandDocument, - new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, MULTIPLE, null) + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), messageSettings, MULTIPLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, Stub(TimeoutContext), null)) @@ -101,7 +101,7 @@ class LoggingCommandEventSenderSpecification extends Specification { def commandDocument = new BsonDocument('ping', new BsonInt32(1)) def replyDocument = new BsonDocument('ok', new BsonInt32(42)) def failureException = new MongoInternalException('failure!') - def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), + def message = new CommandMessage(namespace, commandDocument, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), messageSettings, MULTIPLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, @@ -158,7 +158,7 @@ class LoggingCommandEventSenderSpecification extends Specification { def namespace = new MongoNamespace('test.driver') def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() def commandDocument = new BsonDocument('fake', new BsonBinary(new byte[2048])) - def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), + def message = new CommandMessage(namespace, commandDocument, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), messageSettings, SINGLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, @@ -192,7 +192,7 @@ class LoggingCommandEventSenderSpecification extends Specification { def namespace = new MongoNamespace('test.driver') def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() def commandDocument = new BsonDocument('createUser', new BsonString('private')) - def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(), + def message = new CommandMessage(namespace, commandDocument, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), messageSettings, SINGLE, null) def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy index 855951d425a..251ce1a79fb 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy @@ -168,7 +168,7 @@ class StreamHelper { static hello() { CommandMessage command = new CommandMessage(new MongoNamespace('admin', COMMAND_COLLECTION_NAME), - new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new NoOpFieldNameValidator(), ReadPreference.primary(), + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), MessageSettings.builder().build(), SINGLE, null) OutputBuffer outputBuffer = new BasicOutputBuffer() command.encode(outputBuffer, new OperationContext( diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy index d2e5414bd56..71a4b6eec79 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy @@ -172,7 +172,7 @@ class UsageTrackingConnectionSpecification extends Specification { when: connection.sendAndReceive(new CommandMessage(new MongoNamespace('test.coll'), - new BsonDocument('ping', new BsonInt32(1)), new NoOpFieldNameValidator(), primary(), + new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, primary(), MessageSettings.builder().build(), SINGLE, null), new BsonDocumentCodec(), OPERATION_CONTEXT) then: @@ -189,7 +189,7 @@ class UsageTrackingConnectionSpecification extends Specification { when: connection.sendAndReceiveAsync(new CommandMessage(new MongoNamespace('test.coll'), - new BsonDocument('ping', new BsonInt32(1)), new NoOpFieldNameValidator(), primary(), + new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, primary(), MessageSettings.builder().build(), SINGLE, null), new BsonDocumentCodec(), OPERATION_CONTEXT, futureResultCallback) futureResultCallback.get() diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy index 2e99b61efdf..ba69097cffa 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy @@ -91,7 +91,7 @@ class AsyncOperationHelperSpecification extends Specification { when: executeRetryableWriteAsync(asyncWriteBinding, dbName, primary(), - new NoOpFieldNameValidator(), decoder, commandCreator, FindAndModifyHelper.asyncTransformer(), + NoOpFieldNameValidator.INSTANCE, decoder, commandCreator, FindAndModifyHelper.asyncTransformer(), { cmd -> cmd }, callback) then: diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy index ab6b6e252ab..df2d54bfb9d 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy @@ -107,7 +107,7 @@ class SyncOperationHelperSpecification extends Specification { when: executeRetryableWrite(writeBinding, dbName, primary(), - new NoOpFieldNameValidator(), decoder, commandCreator, FindAndModifyHelper.transformer()) + NoOpFieldNameValidator.INSTANCE, decoder, commandCreator, FindAndModifyHelper.transformer()) { cmd -> cmd } then: diff --git a/driver-core/src/test/unit/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidatorTest.java b/driver-core/src/test/unit/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidatorTest.java index 67d89969fdd..ff7ef713653 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidatorTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidatorTest.java @@ -18,28 +18,27 @@ import org.junit.Test; +import static com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator.INSTANCE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; public class ReplacingDocumentFieldNameValidatorTest { - private final ReplacingDocumentFieldNameValidator fieldNameValidator = new ReplacingDocumentFieldNameValidator(); - @Test public void testFieldValidationSuccess() { - assertTrue(fieldNameValidator.validate("ok")); + assertTrue(INSTANCE.validate("ok")); } @Test public void testFieldNameStartsWithDollarValidation() { - assertFalse(fieldNameValidator.validate("$1")); - assertTrue(fieldNameValidator.validate("$db")); - assertTrue(fieldNameValidator.validate("$ref")); - assertTrue(fieldNameValidator.validate("$id")); + assertFalse(INSTANCE.validate("$1")); + assertTrue(INSTANCE.validate("$db")); + assertTrue(INSTANCE.validate("$ref")); + assertTrue(INSTANCE.validate("$id")); } @Test public void testNestedDocumentsAreNotValidated() { - assertEquals(NoOpFieldNameValidator.class, fieldNameValidator.getValidatorForField("nested").getClass()); + assertEquals(NoOpFieldNameValidator.class, INSTANCE.getValidatorForField("nested").getClass()); } } diff --git a/driver-legacy/src/main/com/mongodb/MongoClient.java b/driver-legacy/src/main/com/mongodb/MongoClient.java index 1e3f0a00c2b..1d888988aca 100644 --- a/driver-legacy/src/main/com/mongodb/MongoClient.java +++ b/driver-legacy/src/main/com/mongodb/MongoClient.java @@ -851,7 +851,7 @@ private void cleanCursors() { try { BsonDocument killCursorsCommand = new BsonDocument("killCursors", new BsonString(cur.namespace.getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(cur.serverCursor.getId())))); - connection.command(cur.namespace.getDatabaseName(), killCursorsCommand, new NoOpFieldNameValidator(), + connection.command(cur.namespace.getDatabaseName(), killCursorsCommand, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), source.getOperationContext()); } finally { connection.release(); diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy index 18a13195d00..eb7d51622a3 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy @@ -84,7 +84,7 @@ class CryptConnectionSpecification extends Specification { def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('find', 'test') .append('filter', new Document('ssid', '555-55-5555')), codec), - new NoOpFieldNameValidator(), ReadPreference.primary(), codec, operationContext) + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), codec, operationContext) then: _ * wrappedConnection.getDescription() >> { @@ -133,8 +133,8 @@ class CryptConnectionSpecification extends Specification { when: def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('insert', 'test'), codec), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), - operationContext, true, payload, new NoOpFieldNameValidator(),) + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), + operationContext, true, payload, NoOpFieldNameValidator.INSTANCE,) then: _ * wrappedConnection.getDescription() >> { @@ -190,8 +190,8 @@ class CryptConnectionSpecification extends Specification { when: def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('insert', 'test'), codec), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload, - new NoOpFieldNameValidator()) + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload, + NoOpFieldNameValidator.INSTANCE) then: _ * wrappedConnection.getDescription() >> { From d8f91fb15905b8ee53c696202c1ec071c3eab40c Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Thu, 15 Aug 2024 16:14:32 -0600 Subject: [PATCH 65/90] Refactor `MixedBulkWriteOperation` a bit and change `CommandMessage.isResponseExpected` such that it accounts for ordered/unordered bulk writes (#1481) --- .../internal/connection/CommandMessage.java | 18 +++-- .../connection/SplittablePayload.java | 19 +++-- .../internal/operation/BulkWriteBatch.java | 16 ++--- .../operation/MixedBulkWriteOperation.java | 71 +++++++++++++------ .../CommandMessageSpecification.groovy | 10 +-- .../MongoClientSessionSpecification.groovy | 3 + .../CryptConnectionSpecification.groovy | 4 +- 7 files changed, 83 insertions(+), 58 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java index c5cd3491ec8..bac2a86e61d 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java @@ -45,6 +45,7 @@ import static com.mongodb.ReadPreference.primary; import static com.mongodb.ReadPreference.primaryPreferred; import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.connection.ClusterConnectionMode.LOAD_BALANCED; import static com.mongodb.connection.ClusterConnectionMode.SINGLE; @@ -112,6 +113,7 @@ public final class CommandMessage extends RequestMessage { this.payloadFieldNameValidator = payloadFieldNameValidator; this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); this.serverApi = serverApi; + assertTrue(useOpMsg() || responseExpected); } /** @@ -187,7 +189,11 @@ private String getSequenceIdentifier(final ByteBuf byteBuf) { } boolean isResponseExpected() { - return !useOpMsg() || requireOpMsgResponse(); + if (responseExpected) { + return true; + } else { + return payload != null && payload.isOrdered() && payload.hasAnotherSplit(); + } } MongoNamespace getNamespace() { @@ -240,7 +246,7 @@ protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOu private int getOpMsgFlagBits() { int flagBits = 0; - if (!requireOpMsgResponse()) { + if (!isResponseExpected()) { flagBits = 1 << 1; } if (exhaustAllowed) { @@ -249,14 +255,6 @@ private int getOpMsgFlagBits() { return flagBits; } - private boolean requireOpMsgResponse() { - if (responseExpected) { - return true; - } else { - return payload != null && payload.hasAnotherSplit(); - } - } - private boolean isDirectConnectionToReplicaSetMember() { return clusterConnectionMode == SINGLE && getSettings().getServerType() != SHARD_ROUTER diff --git a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java index a71f7a940f0..8539a2074ee 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java @@ -37,6 +37,7 @@ import java.util.stream.Collectors; import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.connection.SplittablePayload.Type.INSERT; @@ -57,6 +58,7 @@ public final class SplittablePayload { private final WriteRequestEncoder writeRequestEncoder = new WriteRequestEncoder(); private final Type payloadType; private final List writeRequestWithIndexes; + private final boolean ordered; private final Map insertedIds = new HashMap<>(); private int position = 0; @@ -91,9 +93,10 @@ public enum Type { * @param payloadType the payload type * @param writeRequestWithIndexes the writeRequests */ - public SplittablePayload(final Type payloadType, final List writeRequestWithIndexes) { + public SplittablePayload(final Type payloadType, final List writeRequestWithIndexes, final boolean ordered) { this.payloadType = notNull("batchType", payloadType); this.writeRequestWithIndexes = notNull("writeRequests", writeRequestWithIndexes); + this.ordered = ordered; } /** @@ -117,7 +120,7 @@ public String getPayloadName() { } boolean hasPayload() { - return writeRequestWithIndexes.size() > 0; + return !writeRequestWithIndexes.isEmpty(); } public int size() { @@ -137,10 +140,6 @@ public List getPayload() { .collect(Collectors.toList()); } - public List getWriteRequestWithIndexes() { - return writeRequestWithIndexes; - } - /** * @return the current position in the payload */ @@ -160,16 +159,22 @@ public void setPosition(final int position) { * @return true if there are more values after the current position */ public boolean hasAnotherSplit() { + // this method must be not called before this payload having been encoded + assertTrue(position > 0); return writeRequestWithIndexes.size() > position; } + boolean isOrdered() { + return ordered; + } + /** * @return a new SplittablePayload containing only the values after the current position. */ public SplittablePayload getNextSplit() { isTrue("hasAnotherSplit", hasAnotherSplit()); List nextPayLoad = writeRequestWithIndexes.subList(position, writeRequestWithIndexes.size()); - return new SplittablePayload(payloadType, nextPayLoad); + return new SplittablePayload(payloadType, nextPayLoad, ordered); } /** diff --git a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java index b5d36934605..1bca4734eff 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java @@ -17,7 +17,6 @@ package com.mongodb.internal.operation; import com.mongodb.MongoBulkWriteException; -import com.mongodb.MongoClientException; import com.mongodb.MongoInternalException; import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; @@ -65,6 +64,7 @@ import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE; import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.MixedBulkWriteOperation.commandWriteConcern; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; import static com.mongodb.internal.operation.WriteConcernHelper.createWriteConcernError; @@ -101,12 +101,7 @@ static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace, final List writeRequests, final OperationContext operationContext, @Nullable final BsonValue comment, @Nullable final BsonDocument variables) { - SessionContext sessionContext = operationContext.getSessionContext(); - if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !sessionContext.hasActiveTransaction() - && !writeConcern.isAcknowledged()) { - throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session"); - } - boolean canRetryWrites = isRetryableWrite(retryWrites, writeConcern, connectionDescription, sessionContext); + boolean canRetryWrites = isRetryableWrite(retryWrites, writeConcern, connectionDescription, operationContext.getSessionContext()); List writeRequestsWithIndex = new ArrayList<>(); boolean writeRequestsAreRetryable = true; for (int i = 0; i < writeRequests.size(); i++) { @@ -159,7 +154,7 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti this.indexMap = indexMap; this.unprocessed = unprocessedItems; - this.payload = new SplittablePayload(getPayloadType(batchType), payloadItems); + this.payload = new SplittablePayload(getPayloadType(batchType), payloadItems, ordered); this.operationContext = operationContext; this.comment = comment; this.variables = variables; @@ -169,9 +164,8 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti if (!payloadItems.isEmpty()) { command.put(getCommandName(batchType), new BsonString(namespace.getCollectionName())); command.put("ordered", new BsonBoolean(ordered)); - if (!writeConcern.isServerDefault() && !sessionContext.hasActiveTransaction()) { - command.put("writeConcern", writeConcern.asDocument()); - } + commandWriteConcern(writeConcern, sessionContext).ifPresent(value -> + command.put("writeConcern", value.asDocument())); if (bypassDocumentValidation != null) { command.put("bypassDocumentValidation", new BsonBoolean(bypassDocumentValidation)); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java index 398925511e0..a32ce6d5153 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -16,6 +16,7 @@ package com.mongodb.internal.operation; +import com.mongodb.MongoClientException; import com.mongodb.MongoException; import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; @@ -191,8 +192,8 @@ public BulkWriteResult execute(final WriteBinding binding) { // attach `maxWireVersion` ASAP because it is used to check whether we can retry retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true); SessionContext sessionContext = binding.getOperationContext().getSessionContext(); - WriteConcern writeConcern = getAppliedWriteConcern(sessionContext); - if (!isRetryableWrite(retryWrites, getAppliedWriteConcern(sessionContext), connectionDescription, sessionContext)) { + WriteConcern writeConcern = validateAndGetEffectiveWriteConcern(this.writeConcern, sessionContext); + if (!isRetryableWrite(retryWrites, writeConcern, connectionDescription, sessionContext)) { handleMongoWriteConcernWithResponseException(retryState, true, timeoutContext); } validateWriteRequests(connectionDescription, bypassDocumentValidation, writeRequests, writeConcern); @@ -201,7 +202,7 @@ public BulkWriteResult execute(final WriteBinding binding) { connectionDescription, ordered, writeConcern, bypassDocumentValidation, retryWrites, writeRequests, binding.getOperationContext(), comment, variables), timeoutContext); } - return executeBulkWriteBatch(retryState, binding, connection); + return executeBulkWriteBatch(retryState, writeConcern, binding, connection); }) ); try { @@ -226,8 +227,8 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall // attach `maxWireVersion` ASAP because it is used to check whether we can retry retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true); SessionContext sessionContext = binding.getOperationContext().getSessionContext(); - WriteConcern writeConcern = getAppliedWriteConcern(sessionContext); - if (!isRetryableWrite(retryWrites, getAppliedWriteConcern(sessionContext), connectionDescription, sessionContext) + WriteConcern writeConcern = validateAndGetEffectiveWriteConcern(this.writeConcern, sessionContext); + if (!isRetryableWrite(retryWrites, writeConcern, connectionDescription, sessionContext) && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallback, timeoutContext)) { return; } @@ -245,13 +246,17 @@ && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallba releasingCallback.onResult(null, t); return; } - executeBulkWriteBatchAsync(retryState, binding, connection, releasingCallback); + executeBulkWriteBatchAsync(retryState, writeConcern, binding, connection, releasingCallback); }) ).whenComplete(binding::release); retryingBulkWrite.get(exceptionTransformingCallback(errorHandlingCallback(callback, LOGGER))); } - private BulkWriteResult executeBulkWriteBatch(final RetryState retryState, final WriteBinding binding, final Connection connection) { + private BulkWriteResult executeBulkWriteBatch( + final RetryState retryState, + final WriteConcern effectiveWriteConcern, + final WriteBinding binding, + final Connection connection) { BulkWriteTracker currentBulkWriteTracker = retryState.attachment(AttachmentKeys.bulkWriteTracker()) .orElseThrow(Assertions::fail); BulkWriteBatch currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail); @@ -261,7 +266,7 @@ private BulkWriteResult executeBulkWriteBatch(final RetryState retryState, final while (currentBatch.shouldProcessBatch()) { try { - BsonDocument result = executeCommand(operationContext, connection, currentBatch); + BsonDocument result = executeCommand(effectiveWriteConcern, operationContext, connection, currentBatch); if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) { MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result, connection.getDescription().getServerAddress(), "errMsg", timeoutContext); @@ -295,7 +300,11 @@ private BulkWriteResult executeBulkWriteBatch(final RetryState retryState, final } } - private void executeBulkWriteBatchAsync(final RetryState retryState, final AsyncWriteBinding binding, final AsyncConnection connection, + private void executeBulkWriteBatchAsync( + final RetryState retryState, + final WriteConcern effectiveWriteConcern, + final AsyncWriteBinding binding, + final AsyncConnection connection, final SingleResultCallback callback) { LoopState loopState = new LoopState(); AsyncCallbackRunnable loop = new AsyncCallbackLoop(loopState, iterationCallback -> { @@ -309,7 +318,7 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async } OperationContext operationContext = binding.getOperationContext(); TimeoutContext timeoutContext = operationContext.getTimeoutContext(); - executeCommandAsync(operationContext, connection, currentBatch, (result, t) -> { + executeCommandAsync(effectiveWriteConcern, operationContext, connection, currentBatch, (result, t) -> { if (t == null) { if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) { MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result, @@ -405,31 +414,47 @@ private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetrySta } @Nullable - private BsonDocument executeCommand(final OperationContext operationContext, final Connection connection, final BulkWriteBatch batch) { + private BsonDocument executeCommand( + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext, + final Connection connection, + final BulkWriteBatch batch) { return connection.command(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), - operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()), + operationContext, shouldExpectResponse(batch, effectiveWriteConcern), batch.getPayload(), batch.getFieldNameValidator()); } - private void executeCommandAsync(final OperationContext operationContext, final AsyncConnection connection, final BulkWriteBatch batch, + private void executeCommandAsync( + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext, + final AsyncConnection connection, + final BulkWriteBatch batch, final SingleResultCallback callback) { connection.commandAsync(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), - operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()), + operationContext, shouldExpectResponse(batch, effectiveWriteConcern), batch.getPayload(), batch.getFieldNameValidator(), callback); } - private WriteConcern getAppliedWriteConcern(final SessionContext sessionContext) { - if (sessionContext.hasActiveTransaction()) { - return WriteConcern.ACKNOWLEDGED; - } else { - return writeConcern; + private static WriteConcern validateAndGetEffectiveWriteConcern(final WriteConcern writeConcernSetting, final SessionContext sessionContext) + throws MongoClientException { + boolean activeTransaction = sessionContext.hasActiveTransaction(); + WriteConcern effectiveWriteConcern = activeTransaction + ? WriteConcern.ACKNOWLEDGED + : writeConcernSetting; + if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !activeTransaction && !effectiveWriteConcern.isAcknowledged()) { + throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session"); } + return effectiveWriteConcern; } - private boolean shouldAcknowledge(final BulkWriteBatch batch, final SessionContext sessionContext) { - return ordered - ? batch.hasAnotherBatch() || getAppliedWriteConcern(sessionContext).isAcknowledged() - : getAppliedWriteConcern(sessionContext).isAcknowledged(); + static Optional commandWriteConcern(final WriteConcern effectiveWriteConcern, final SessionContext sessionContext) { + return effectiveWriteConcern.isServerDefault() || sessionContext.hasActiveTransaction() + ? Optional.empty() + : Optional.of(effectiveWriteConcern); + } + + private boolean shouldExpectResponse(final BulkWriteBatch batch, final WriteConcern effectiveWriteConcern) { + return effectiveWriteConcern.isAcknowledged() || (ordered && batch.hasAnotherBatch()); } private void addErrorLabelsToWriteConcern(final BsonDocument result, final Set errorLabels) { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy index 8c10755cca8..e8ed6c152ae 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy @@ -172,7 +172,7 @@ class CommandMessageSpecification extends Specification { new BsonDocument('insert', new BsonString('coll')), new SplittablePayload(INSERT, [new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) } ), + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true), ], [ LATEST_WIRE_VERSION, @@ -193,7 +193,7 @@ class CommandMessageSpecification extends Specification { new BsonDocument('_id', new BsonInt32(3)).append('c', new BsonBinary(new byte[450])), new BsonDocument('_id', new BsonInt32(4)).append('b', new BsonBinary(new byte[441])), new BsonDocument('_id', new BsonInt32(5)).append('c', new BsonBinary(new byte[451]))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) } ) + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) def message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) def output = new BasicOutputBuffer() @@ -280,7 +280,7 @@ class CommandMessageSpecification extends Specification { def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900])), new BsonDocument('b', new BsonBinary(new byte[450])), new BsonDocument('c', new BsonBinary(new byte[450]))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) } ) + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) def output = new BasicOutputBuffer() @@ -328,7 +328,7 @@ class CommandMessageSpecification extends Specification { def messageSettings = MessageSettings.builder().maxDocumentSize(900) .maxWireVersion(LATEST_WIRE_VERSION).build() def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900]))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }) + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) def output = new BasicOutputBuffer() @@ -348,7 +348,7 @@ class CommandMessageSpecification extends Specification { given: def messageSettings = MessageSettings.builder().serverType(ServerType.SHARD_ROUTER) .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION).build() - def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonInt32(1))]) + def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonInt32(1))], true) def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) def output = new BasicOutputBuffer() diff --git a/driver-sync/src/test/functional/com/mongodb/client/MongoClientSessionSpecification.groovy b/driver-sync/src/test/functional/com/mongodb/client/MongoClientSessionSpecification.groovy index f5eead4cdfc..fc688fec5df 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/MongoClientSessionSpecification.groovy +++ b/driver-sync/src/test/functional/com/mongodb/client/MongoClientSessionSpecification.groovy @@ -26,6 +26,7 @@ import com.mongodb.WriteConcern import com.mongodb.client.model.Filters import com.mongodb.event.CommandStartedEvent import com.mongodb.internal.connection.TestCommandListener +import com.mongodb.internal.time.Timeout import org.bson.BsonBinarySubType import org.bson.BsonDocument import org.bson.BsonInt32 @@ -350,9 +351,11 @@ class MongoClientSessionSpecification extends FunctionalSpecification { void waitForInsertAcknowledgement(MongoCollection collection, ObjectId id) { Document document = collection.find(Filters.eq(id)).first() + Timeout timeout = Timeout.expiresIn(5, TimeUnit.SECONDS, Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE) while (document == null) { Thread.sleep(1) document = collection.find(Filters.eq(id)).first() + timeout.onExpired { assert !"Timed out waiting for insert acknowledgement".trim() } } } } diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy index eb7d51622a3..8293b6a1599 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy @@ -115,7 +115,7 @@ class CryptConnectionSpecification extends Specification { def payload = new SplittablePayload(INSERT, [ new BsonDocumentWrapper(new Document('_id', 1).append('ssid', '555-55-5555').append('b', bytes), codec), new BsonDocumentWrapper(new Document('_id', 2).append('ssid', '666-66-6666').append('b', bytes), codec) - ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }) + ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) def encryptedCommand = toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', new BsonArray( [ new BsonDocument('_id', new BsonInt32(1)) @@ -172,7 +172,7 @@ class CryptConnectionSpecification extends Specification { new BsonDocumentWrapper(new Document('_id', 1), codec), new BsonDocumentWrapper(new Document('_id', 2), codec), new BsonDocumentWrapper(new Document('_id', 3), codec) - ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }) + ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) def encryptedCommand = toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', new BsonArray( [ new BsonDocument('_id', new BsonInt32(1)), From bc49800ba25ff8ba719be3e3c1eb4d3af82d359d Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Thu, 15 Aug 2024 16:46:21 -0600 Subject: [PATCH 66/90] MixedBulkWriteOperation should generate inserted document IDs at most once per batch (#1482) JAVA-5572 --- .../connection/IdHoldingBsonWriter.java | 20 ++++- .../connection/SplittablePayload.java | 22 +++++- .../IdHoldingBsonWriterSpecification.groovy | 31 ++++++-- .../com/mongodb/client/CrudProseTest.java | 73 +++++++++++++++++++ 4 files changed, 133 insertions(+), 13 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java index 606458b3382..4120dbdfb17 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java +++ b/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java @@ -16,6 +16,7 @@ package com.mongodb.internal.connection; +import com.mongodb.lang.Nullable; import org.bson.BsonBinary; import org.bson.BsonBinaryWriter; import org.bson.BsonBoolean; @@ -57,11 +58,17 @@ public class IdHoldingBsonWriter extends LevelCountingBsonWriter { private LevelCountingBsonWriter idBsonBinaryWriter; private BasicOutputBuffer outputBuffer; private String currentFieldName; + private final BsonValue fallbackId; private BsonValue id; private boolean idFieldIsAnArray = false; - public IdHoldingBsonWriter(final BsonWriter bsonWriter) { + /** + * @param fallbackId The "_id" field value to use if the top-level document written via this {@link BsonWriter} + * does not have "_id". If {@code null}, then a new {@link BsonObjectId} is generated instead. + */ + public IdHoldingBsonWriter(final BsonWriter bsonWriter, @Nullable final BsonObjectId fallbackId) { super(bsonWriter); + this.fallbackId = fallbackId; } @Override @@ -99,7 +106,7 @@ public void writeEndDocument() { } if (getCurrentLevel() == 0 && id == null) { - id = new BsonObjectId(); + id = fallbackId == null ? new BsonObjectId() : fallbackId; writeObjectId(ID_FIELD_NAME, id.asObjectId().getValue()); } super.writeEndDocument(); @@ -408,6 +415,15 @@ public void flush() { super.flush(); } + /** + * Returns either the value of the "_id" field from the top-level document written via this {@link BsonWriter}, + * provided that the document is not {@link RawBsonDocument}, + * or the generated {@link BsonObjectId}. + * If the document is {@link RawBsonDocument}, then returns {@code null}. + *

      + * {@linkplain #flush() Flushing} is not required before calling this method.

      + */ + @Nullable public BsonValue getId() { return id; } diff --git a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java index 8539a2074ee..d628a39238d 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java @@ -23,6 +23,7 @@ import com.mongodb.internal.bulk.WriteRequestWithIndex; import org.bson.BsonDocument; import org.bson.BsonDocumentWrapper; +import org.bson.BsonObjectId; import org.bson.BsonValue; import org.bson.BsonWriter; import org.bson.codecs.BsonValueCodecProvider; @@ -196,10 +197,23 @@ public void encode(final BsonWriter writer, final WriteRequestWithIndex writeReq InsertRequest insertRequest = (InsertRequest) writeRequestWithIndex.getWriteRequest(); BsonDocument document = insertRequest.getDocument(); - IdHoldingBsonWriter idHoldingBsonWriter = new IdHoldingBsonWriter(writer); - getCodec(document).encode(idHoldingBsonWriter, document, - EncoderContext.builder().isEncodingCollectibleDocument(true).build()); - insertedIds.put(writeRequestWithIndex.getIndex(), idHoldingBsonWriter.getId()); + BsonValue documentId = insertedIds.compute( + writeRequestWithIndex.getIndex(), + (writeRequestIndex, writeRequestDocumentId) -> { + IdHoldingBsonWriter idHoldingBsonWriter = new IdHoldingBsonWriter( + writer, + // Reuse `writeRequestDocumentId` if it may have been generated + // by `IdHoldingBsonWriter` in a previous attempt. + // If its type is not `BsonObjectId`, we know it could not have been generated. + writeRequestDocumentId instanceof BsonObjectId ? writeRequestDocumentId.asObjectId() : null); + getCodec(document).encode(idHoldingBsonWriter, document, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()); + return idHoldingBsonWriter.getId(); + }); + if (documentId == null) { + // we must add an entry anyway because we rely on all the indexes being present + insertedIds.put(writeRequestWithIndex.getIndex(), null); + } } else if (writeRequestWithIndex.getType() == WriteRequest.Type.UPDATE || writeRequestWithIndex.getType() == WriteRequest.Type.REPLACE) { UpdateRequest update = (UpdateRequest) writeRequestWithIndex.getWriteRequest(); diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/IdHoldingBsonWriterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/IdHoldingBsonWriterSpecification.groovy index 451545632d4..f603576ecfb 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/IdHoldingBsonWriterSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/IdHoldingBsonWriterSpecification.groovy @@ -32,11 +32,12 @@ import static org.bson.BsonHelper.documentWithValuesOfEveryType import static org.bson.BsonHelper.getBsonValues class IdHoldingBsonWriterSpecification extends Specification { + private static final OBJECT_ID = new BsonObjectId() def 'should write all types'() { given: def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) def document = documentWithValuesOfEveryType() when: @@ -47,18 +48,25 @@ class IdHoldingBsonWriterSpecification extends Specification { !document.containsKey('_id') encodedDocument.containsKey('_id') idTrackingBsonWriter.getId() == encodedDocument.get('_id') + if (expectedIdNullIfMustBeGenerated != null) { + idTrackingBsonWriter.getId() == expectedIdNullIfMustBeGenerated + } when: encodedDocument.remove('_id') then: encodedDocument == document + + where: + fallbackId << [null, OBJECT_ID] + expectedIdNullIfMustBeGenerated << [null, OBJECT_ID] } def 'should support all types for _id value'() { given: def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) def document = new BsonDocument() document.put('_id', id) @@ -71,12 +79,15 @@ class IdHoldingBsonWriterSpecification extends Specification { idTrackingBsonWriter.getId() == id where: - id << getBsonValues() + [id, fallbackId] << [ + getBsonValues(), + [null, new BsonObjectId()] + ].combinations() } def 'serialize document with list of documents that contain an _id field'() { def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) def document = new BsonDocument('_id', new BsonObjectId()) .append('items', new BsonArray(Collections.singletonList(new BsonDocument('_id', new BsonObjectId())))) @@ -86,11 +97,14 @@ class IdHoldingBsonWriterSpecification extends Specification { then: encodedDocument == document + + where: + fallbackId << [null, new BsonObjectId()] } def 'serialize _id documents containing arrays'() { def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) BsonDocument document = BsonDocument.parse(json) when: @@ -102,7 +116,8 @@ class IdHoldingBsonWriterSpecification extends Specification { encodedDocument == document where: - json << ['{"_id": {"a": []}, "b": 123}', + [json, fallbackId] << [ + ['{"_id": {"a": []}, "b": 123}', '{"_id": {"a": [1, 2]}, "b": 123}', '{"_id": {"a": [[[[1]]]]}, "b": 123}', '{"_id": {"a": [{"a": [1, 2]}]}, "b": 123}', @@ -112,7 +127,9 @@ class IdHoldingBsonWriterSpecification extends Specification { '{"_id": [1, 2], "b": 123}', '{"_id": [[1], [[2]]], "b": 123}', '{"_id": [{"a": 1}], "b": 123}', - '{"_id": [{"a": [{"b": 123}]}]}'] + '{"_id": [{"a": [{"b": 123}]}]}'], + [null, new BsonObjectId()] + ].combinations() } private static BsonDocument getEncodedDocument(BsonOutput buffer) { diff --git a/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java index b8d94cfe067..5d3907bb210 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java @@ -19,21 +19,35 @@ import com.mongodb.MongoBulkWriteException; import com.mongodb.MongoWriteConcernException; import com.mongodb.MongoWriteException; +import com.mongodb.ServerAddress; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.Filters; import com.mongodb.client.model.ValidationOptions; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonInt32; import org.bson.BsonString; +import org.bson.BsonValue; import org.bson.Document; +import org.bson.codecs.pojo.PojoCodecProvider; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; import static java.lang.String.format; import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -114,6 +128,54 @@ public void testWriteErrorDetailsIsPropagated() { } } + /** + * This test is not from the specification. + */ + @Test + @SuppressWarnings("try") + void insertMustGenerateIdAtMostOnce() throws ExecutionException, InterruptedException { + assumeTrue(isDiscoverableReplicaSet()); + ServerAddress primaryServerAddress = Fixture.getPrimary(); + CompletableFuture futureIdGeneratedByFirstInsertAttempt = new CompletableFuture<>(); + CompletableFuture futureIdGeneratedBySecondInsertAttempt = new CompletableFuture<>(); + CommandListener commandListener = new CommandListener() { + @Override + public void commandStarted(final CommandStartedEvent event) { + if (event.getCommandName().equals("insert")) { + BsonValue generatedId = event.getCommand().getArray("documents").get(0).asDocument().get("_id"); + if (!futureIdGeneratedByFirstInsertAttempt.isDone()) { + futureIdGeneratedByFirstInsertAttempt.complete(generatedId); + } else { + futureIdGeneratedBySecondInsertAttempt.complete(generatedId); + } + } + } + }; + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(1))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString("insert")))) + .append("errorLabels", new BsonArray(singletonList(new BsonString("RetryableWriteError")))) + .append("writeConcernError", new BsonDocument("code", new BsonInt32(91)) + .append("errmsg", new BsonString("Replication is being shut down")))); + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .retryWrites(true) + .addCommandListener(commandListener) + .applyToServerSettings(builder -> builder.heartbeatFrequency(50, TimeUnit.MILLISECONDS)) + .build()); + FailPoint ignored = FailPoint.enable(failPointDocument, primaryServerAddress)) { + MongoCollection coll = client.getDatabase(database.getName()) + .getCollection(collection.getNamespace().getCollectionName(), MyDocument.class) + .withCodecRegistry(fromRegistries( + getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build()))); + BsonValue insertedId = coll.insertOne(new MyDocument()).getInsertedId(); + BsonValue idGeneratedByFirstInsertAttempt = futureIdGeneratedByFirstInsertAttempt.get(); + assertEquals(idGeneratedByFirstInsertAttempt, insertedId); + assertEquals(idGeneratedByFirstInsertAttempt, futureIdGeneratedBySecondInsertAttempt.get()); + } + } + private void setFailPoint() { failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) .append("mode", new BsonDocument("times", new BsonInt32(1))) @@ -130,4 +192,15 @@ private void setFailPoint() { private void disableFailPoint() { getCollectionHelper().runAdminCommand(failPointDocument.append("mode", new BsonString("off"))); } + + public static final class MyDocument { + private int v; + + public MyDocument() { + } + + public int getV() { + return v; + } + } } From f2cfac7887c6b57abb7681edf65a02ba44a764cf Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Mon, 19 Aug 2024 09:28:24 -0700 Subject: [PATCH 67/90] Support Range Indexes as GA. (#1465) - Make trimFactor and sparsity optional. - Update specification tests. - Move duplicated code to EncryptionFixture. - Remove Beta annotation from Range algorithm methods. JAVA-5537 JAVA-5441 --------- Co-authored-by: Valentin Kovalenko --- build.gradle | 2 +- .../client/model/vault/EncryptOptions.java | 6 - .../client/model/vault/RangeOptions.java | 18 +- .../legacy/fle2v2-BypassQueryAnalysis.json | 1 - .../legacy/fle2v2-Compact.json | 6 +- .../fle2v2-CreateCollection-OldServer.json | 32 + .../legacy/fle2v2-CreateCollection.json | 19 - .../legacy/fle2v2-DecryptExistingData.json | 1 - .../legacy/fle2v2-Delete.json | 1 - ...EncryptedFields-vs-EncryptedFieldsMap.json | 1 - .../fle2v2-EncryptedFields-vs-jsonSchema.json | 1 - .../fle2v2-EncryptedFieldsMap-defaults.json | 1 - .../legacy/fle2v2-FindOneAndUpdate.json | 1 - .../legacy/fle2v2-InsertFind-Indexed.json | 1 - .../legacy/fle2v2-InsertFind-Unindexed.json | 1 - .../legacy/fle2v2-MissingKey.json | 5 +- .../legacy/fle2v2-NoEncryption.json | 1 - .../legacy/fle2v2-Rangev2-Compact.json | 289 +++ .../legacy/fle2v2-Rangev2-Date-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Date-Delete.json | 2 +- .../fle2v2-Rangev2-Date-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Date-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Date-Update.json | 2 +- .../fle2v2-Rangev2-Decimal-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Decimal-Delete.json | 2 +- ...e2v2-Rangev2-Decimal-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Decimal-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Decimal-Update.json | 2 +- ...v2-Rangev2-DecimalPrecision-Aggregate.json | 2 +- ...le2v2-Rangev2-DecimalPrecision-Delete.json | 2 +- ...ev2-DecimalPrecision-FindOneAndUpdate.json | 2 +- ...2-Rangev2-DecimalPrecision-InsertFind.json | 2 +- ...le2v2-Rangev2-DecimalPrecision-Update.json | 2 +- .../legacy/fle2v2-Rangev2-Defaults.json | 381 ++++ .../fle2v2-Rangev2-Double-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Double-Delete.json | 2 +- ...le2v2-Rangev2-Double-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Double-InsertFind.json | 187 +- .../legacy/fle2v2-Rangev2-Double-Update.json | 2 +- ...2v2-Rangev2-DoublePrecision-Aggregate.json | 1876 ++++------------- ...fle2v2-Rangev2-DoublePrecision-Delete.json | 2 +- ...gev2-DoublePrecision-FindOneAndUpdate.json | 2 +- ...v2-Rangev2-DoublePrecision-InsertFind.json | 2 +- ...fle2v2-Rangev2-DoublePrecision-Update.json | 2 +- .../legacy/fle2v2-Rangev2-Int-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Int-Delete.json | 2 +- .../fle2v2-Rangev2-Int-FindOneAndUpdate.json | 2 +- .../legacy/fle2v2-Rangev2-Int-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Int-Update.json | 2 +- .../legacy/fle2v2-Rangev2-Long-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Long-Delete.json | 2 +- .../fle2v2-Rangev2-Long-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Long-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Long-Update.json | 2 +- .../legacy/fle2v2-Update.json | 1 - ...v2-validatorAndPartialFieldExpression.json | 1 - .../client/vault/ClientEncryption.java | 4 - ...ionRangeDefaultExplicitEncryptionTest.java | 33 + .../mongodb/scala/model/vault/package.scala | 2 - .../scala/vault/ClientEncryption.scala | 4 +- .../com/mongodb/client/internal/Crypt.java | 3 - .../client/vault/ClientEncryption.java | 4 - ...tractClientSideEncryptionDeadlockTest.java | 12 +- ...entSideEncryptionDecryptionEventsTest.java | 12 +- ...tSideEncryptionExplicitEncryptionTest.java | 12 +- ...ionRangeDefaultExplicitEncryptionTest.java | 129 ++ ...EncryptionRangeExplicitEncryptionTest.java | 14 +- ...eEncryptionUniqueIndexKeyAltNamesTest.java | 12 +- ...ryptionDataKeyAndDoubleEncryptionTest.java | 28 +- .../ClientSideEncryptionCorpusTest.java | 31 +- ...ionRangeDefaultExplicitEncryptionTest.java | 31 + .../client/JsonPoweredCrudTestHelper.java | 9 +- .../mongodb/fixture/EncryptionFixture.java | 83 + 73 files changed, 1553 insertions(+), 1769 deletions(-) create mode 100644 driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Compact.json create mode 100644 driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Defaults.json create mode 100644 driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java create mode 100644 driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest.java create mode 100644 driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java create mode 100644 driver-sync/src/test/functional/com/mongodb/fixture/EncryptionFixture.java diff --git a/build.gradle b/build.gradle index ccd32c0bc9e..86fe2ad12d4 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ ext { zstdVersion = '1.5.5-3' awsSdkV2Version = '2.18.9' awsSdkV1Version = '1.12.337' - mongoCryptVersion = '1.11.0-SNAPSHOT' + mongoCryptVersion = '1.11.0' projectReactorVersion = '2022.0.0' junitBomVersion = '5.10.2' logbackVersion = '1.3.14' diff --git a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java index 868470ee1fc..cfdf833e892 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java @@ -54,7 +54,6 @@ public EncryptOptions(final String algorithm) { *
    32. Unindexed
    33. *
    34. Range
    35. * - * Note: The Range algorithm is unstable. It is subject to breaking changes. * * @return the encryption algorithm */ @@ -118,7 +117,6 @@ public EncryptOptions keyAltName(final String keyAltName) { * The contention factor. * *

      It is an error to set contentionFactor when algorithm is not "Indexed" or "Range". - *

      Note: The Range algorithm is unstable. It is subject to breaking changes.

      * @param contentionFactor the contention factor, which must be {@code >= 0} or null. * @return this * @since 4.7 @@ -147,7 +145,6 @@ public Long getContentionFactor() { * *

      Currently, we support only "equality" or "range" queryType.

      *

      It is an error to set queryType when the algorithm is not "Indexed" or "Range".

      - *

      Note: The Range algorithm is unstable. It is subject to breaking changes.

      * @param queryType the query type * @return this * @since 4.7 @@ -162,7 +159,6 @@ public EncryptOptions queryType(@Nullable final String queryType) { * Gets the QueryType. * *

      Currently, we support only "equality" or "range" queryType.

      - *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * @see #queryType(String) * @return the queryType or null * @since 4.7 @@ -177,14 +173,12 @@ public String getQueryType() { * The RangeOptions * *

      It is an error to set RangeOptions when the algorithm is not "Range". - *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * @param rangeOptions the range options * @return this * @since 4.9 * @mongodb.server.release 8.0 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ - @Beta(Reason.SERVER) public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) { this.rangeOptions = rangeOptions; return this; diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java index fcdc70281bb..495f06a0650 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java @@ -16,8 +16,6 @@ package com.mongodb.client.model.vault; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.BsonValue; @@ -29,12 +27,10 @@ * *

      For {@code double} and {@code decimal128}, {@code min}/{@code max}/{@code precision} must all be set, or all be unset. * - *

      Note: The "Range" algorithm is unstable. It is subject to breaking changes. * @since 4.9 * @mongodb.server.release 6.2 * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ -@Beta(Reason.SERVER) public class RangeOptions { private BsonValue min; @@ -81,18 +77,21 @@ public RangeOptions max(@Nullable final BsonValue max) { * @return the trim factor value if set * @since 5.2 */ + @Nullable public Integer getTrimFactor() { return trimFactor; } /** - * Set the number of top-level edges stored per record by setting a trim factor, reducing write conflicts during simultaneous inserts - * and optimizing queries by excluding seldom-used high-level edges. + * Set the number of top-level edges stored per record. + *

      + * The trim factor may be used to tune performance. + * * @param trimFactor the trim factor * @return this * @since 5.2 */ - public RangeOptions setTrimFactor(final Integer trimFactor) { + public RangeOptions trimFactor(@Nullable final Integer trimFactor) { this.trimFactor = trimFactor; return this; } @@ -106,7 +105,10 @@ public BsonValue getMax() { } /** - * Set the Queryable Encryption range hypergraph sparsity factor + * Set the Queryable Encryption range hypergraph sparsity factor. + *

      + * Sparsity may be used to tune performance. + * * @param sparsity the sparsity * @return this */ diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-BypassQueryAnalysis.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-BypassQueryAnalysis.json index dcc3983ae0c..9b28df2f9a1 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-BypassQueryAnalysis.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-BypassQueryAnalysis.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Compact.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Compact.json index e47c689bf06..85fb8bf607a 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Compact.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Compact.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -131,6 +130,9 @@ "command": { "compactStructuredEncryptionData": "default" } + }, + "result": { + "ok": 1 } } ], @@ -228,4 +230,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection-OldServer.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection-OldServer.json index d5b04b3ea5f..c266aa6b835 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection-OldServer.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection-OldServer.json @@ -55,6 +55,38 @@ "result": { "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } } ] } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection.json index 819d2eec3c4..c324be8abc5 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-CreateCollection.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -158,9 +157,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -343,9 +339,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -851,9 +844,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1048,9 +1038,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1367,9 +1354,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1635,9 +1619,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-DecryptExistingData.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-DecryptExistingData.json index 905d3c9456f..1fb4c1d1bc7 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-DecryptExistingData.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-DecryptExistingData.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Delete.json index e4150eab8e6..ddfe57b00cb 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Delete.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json index b579979e945..bdc5c99bc28 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json index 0a84d736509..8e0c6dafa3c 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFieldsMap-defaults.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFieldsMap-defaults.json index 3e0905eadf3..1c0a057cad4 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFieldsMap-defaults.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-EncryptedFieldsMap-defaults.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-FindOneAndUpdate.json index 4606fbb930e..c5e689a3de4 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-FindOneAndUpdate.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Indexed.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Indexed.json index c7149d1f5c2..6e156ffc603 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Indexed.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Indexed.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Unindexed.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Unindexed.json index 008b0c959ff..48280f5bd4d 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Unindexed.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-InsertFind-Unindexed.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-MissingKey.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-MissingKey.json index 0b7e86bca3a..1e655f0a9c4 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-MissingKey.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-MissingKey.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -55,7 +54,7 @@ "key_vault_data": [], "tests": [ { - "description": "FLE2 encrypt fails with mising key", + "description": "FLE2 encrypt fails with missing key", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -86,7 +85,7 @@ ] }, { - "description": "FLE2 decrypt fails with mising key", + "description": "FLE2 decrypt fails with missing key", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-NoEncryption.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-NoEncryption.json index 185691d61c2..a6843c4737f 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-NoEncryption.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-NoEncryption.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Compact.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Compact.json new file mode 100644 index 00000000000..59241927ca1 --- /dev/null +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Compact.json @@ -0,0 +1,289 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works with 'range' fields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "ok": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedInt": { + "ecoc": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "QxKJD2If48p0l8NAXf2Kr0aleMd/dATSjBK6hTpNMyc=", + "subType": "00" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Aggregate.json index 63a2db3ef13..df2161cc364 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Aggregate.json @@ -328,7 +328,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Delete.json index 63a2b29fccc..b4f15d9b1fd 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Delete.json @@ -317,7 +317,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json index 049186c8695..97ab4aaeb91 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -330,7 +330,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-InsertFind.json index d0751434b5f..a011c388e46 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-InsertFind.json @@ -322,7 +322,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Update.json index 1e7750feebd..6bab6499f55 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Date-Update.json @@ -330,7 +330,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Aggregate.json index 5f573a933db..d1a82c21644 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Aggregate.json @@ -288,7 +288,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Delete.json index a94dd40feed..19cae3c64fa 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Delete.json @@ -279,7 +279,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json index 5226facfb64..4ab3b63ea56 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -288,7 +288,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-InsertFind.json index b6615454bd6..5a2adf69070 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-InsertFind.json @@ -282,7 +282,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Update.json index ceef8ca9ba2..b840d38347a 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Decimal-Update.json @@ -290,7 +290,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json index 35cc4aba874..271f57b125d 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -317,7 +317,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json index e000c405897..7b3d5d8225a 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -308,7 +308,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json index 27f10a30a79..af371f7b3fe 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -317,7 +317,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json index 5fb96730d6c..bbe81f87adc 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -311,7 +311,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json index f67ae3ca237..987bdf1aa66 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -319,7 +319,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Defaults.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Defaults.json new file mode 100644 index 00000000000..c2a119cb7f6 --- /dev/null +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,381 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DRgbAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAIAAAAAAAAAEHRmAAYAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Aggregate.json index e14ca8ff0ca..daa7f4e9736 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Aggregate.json @@ -290,7 +290,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Delete.json index 6821c97939b..4a9c1f27b5d 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Delete.json @@ -281,7 +281,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json index 298a4506ccf..d7860de83ec 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -290,7 +290,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-InsertFind.json index dabe8a0930d..934af381f1e 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-InsertFind.json @@ -72,7 +72,7 @@ ], "tests": [ { - "description": "FLE2 Range Double. Update.", + "description": "FLE2 Range Double. Insert and Find.", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -111,7 +111,7 @@ } }, { - "name": "updateOne", + "name": "find", "arguments": { "filter": { "encryptedDoubleNoPrecision": { @@ -119,20 +119,16 @@ "$numberDouble": "0" } } - }, - "update": { - "$set": { - "encryptedDoubleNoPrecision": { - "$numberDouble": "2" - } - } } }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] } ], "expectations": [ @@ -282,31 +278,18 @@ }, { "command_started_event": { - "command_name": "update", "command": { - "update": "default", - "ordered": true, - "updates": [ - { - "q": { - "encryptedDoubleNoPrecision": { - "$gt": { - "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", - "subType": "06" - } - } - } - }, - "u": { - "$set": { - "encryptedDoubleNoPrecision": { - "$$type": "binData" - } + "find": "default", + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" } } } - ], + }, "encryptionInformation": { "type": 1, "schema": { @@ -339,9 +322,9 @@ ] } } - }, - "$db": "default" - } + } + }, + "command_name": "find" } } ], @@ -748,385 +731,385 @@ "__safeContent__": [ { "$binary": { - "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", "subType": "00" } }, { "$binary": { - "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", "subType": "00" } }, { "$binary": { - "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", "subType": "00" } }, { "$binary": { - "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", "subType": "00" } }, { "$binary": { - "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", "subType": "00" } }, { "$binary": { - "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", "subType": "00" } }, { "$binary": { - "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", "subType": "00" } }, { "$binary": { - "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", "subType": "00" } }, { "$binary": { - "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", "subType": "00" } }, { "$binary": { - "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", "subType": "00" } }, { "$binary": { - "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", "subType": "00" } }, { "$binary": { - "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", "subType": "00" } }, { "$binary": { - "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", "subType": "00" } }, { "$binary": { - "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", "subType": "00" } }, { "$binary": { - "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", "subType": "00" } }, { "$binary": { - "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", "subType": "00" } }, { "$binary": { - "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", "subType": "00" } }, { "$binary": { - "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", "subType": "00" } }, { "$binary": { - "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", "subType": "00" } }, { "$binary": { - "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", "subType": "00" } }, { "$binary": { - "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", "subType": "00" } }, { "$binary": { - "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", "subType": "00" } }, { "$binary": { - "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", "subType": "00" } }, { "$binary": { - "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", "subType": "00" } }, { "$binary": { - "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", "subType": "00" } }, { "$binary": { - "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", "subType": "00" } }, { "$binary": { - "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", "subType": "00" } }, { "$binary": { - "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", "subType": "00" } }, { "$binary": { - "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", "subType": "00" } }, { "$binary": { - "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", "subType": "00" } }, { "$binary": { - "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", "subType": "00" } }, { "$binary": { - "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", "subType": "00" } }, { "$binary": { - "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", "subType": "00" } }, { "$binary": { - "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", "subType": "00" } }, { "$binary": { - "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", "subType": "00" } }, { "$binary": { - "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", "subType": "00" } }, { "$binary": { - "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", "subType": "00" } }, { "$binary": { - "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", "subType": "00" } }, { "$binary": { - "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", "subType": "00" } }, { "$binary": { - "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", "subType": "00" } }, { "$binary": { - "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", "subType": "00" } }, { "$binary": { - "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", "subType": "00" } }, { "$binary": { - "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", "subType": "00" } }, { "$binary": { - "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", "subType": "00" } }, { "$binary": { - "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", "subType": "00" } }, { "$binary": { - "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", "subType": "00" } }, { "$binary": { - "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", "subType": "00" } }, { "$binary": { - "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", "subType": "00" } }, { "$binary": { - "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", "subType": "00" } }, { "$binary": { - "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", "subType": "00" } }, { "$binary": { - "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", "subType": "00" } }, { "$binary": { - "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", "subType": "00" } }, { "$binary": { - "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", "subType": "00" } }, { "$binary": { - "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", "subType": "00" } }, { "$binary": { - "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", "subType": "00" } }, { "$binary": { - "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", "subType": "00" } }, { "$binary": { - "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", "subType": "00" } }, { "$binary": { - "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", "subType": "00" } }, { "$binary": { - "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", "subType": "00" } }, { "$binary": { - "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", "subType": "00" } }, { "$binary": { - "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", "subType": "00" } }, { "$binary": { - "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", "subType": "00" } }, { "$binary": { - "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", "subType": "00" } }, { "$binary": { - "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", "subType": "00" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json index dabe8a0930d..ec95e0334a1 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Double-Update.json @@ -292,7 +292,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json index 87d0e3dd8c1..e8a50ebeca9 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -81,7 +81,7 @@ ], "tests": [ { - "description": "Find with $gt", + "description": "FLE2 Range DoublePrecision. Aggregate.", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -103,7 +103,7 @@ "document": { "_id": 0, "encryptedDoublePrecision": { - "$numberDouble": "0.0" + "$numberDouble": "0" } } } @@ -114,1298 +114,11 @@ "document": { "_id": 1, "encryptedDoublePrecision": { - "$numberDouble": "1.0" + "$numberDouble": "1" } } } }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "0.0" - } - } - } - }, - "result": [ - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Find with $gte", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$gte": { - "$numberDouble": "0.0" - } - } - }, - "sort": { - "_id": 1 - } - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - }, - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Find with $gt with no results", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "1.0" - } - } - } - }, - "result": [] - } - ] - }, - { - "description": "Find with $lt", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$lt": { - "$numberDouble": "1.0" - } - } - } - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - ] - } - ] - }, - { - "description": "Find with $lte", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$lte": { - "$numberDouble": "1.0" - } - } - }, - "sort": { - "_id": 1 - } - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - }, - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Find with $lt below min", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$lt": { - "$numberDouble": "0.0" - } - } - } - }, - "result": { - "errorContains": "must be greater than the range minimum" - } - } - ] - }, - { - "description": "Find with $gt above max", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "200.0" - } - } - } - }, - "result": { - "errorContains": "must be less than the range max" - } - } - ] - }, - { - "description": "Find with $gt and $lt", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "0.0" - }, - "$lt": { - "$numberDouble": "2.0" - } - } - } - }, - "result": [ - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Find with equality", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - ] - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - }, - "result": [ - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Find with full range", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$gte": { - "$numberDouble": "0.0" - }, - "$lte": { - "$numberDouble": "200.0" - } - } - }, - "sort": { - "_id": 1 - } - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - }, - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Find with $in", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedDoublePrecision": { - "$in": [ - { - "$numberDouble": "0.0" - } - ] - } - } - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - ] - } - ] - }, - { - "description": "Insert out of range", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "-1" - } - } - }, - "result": { - "errorContains": "value must be greater than or equal to the minimum value" - } - } - ] - }, - { - "description": "Insert min and max", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 200, - "encryptedDoublePrecision": { - "$numberDouble": "200.0" - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - } - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - }, - { - "_id": 200, - "encryptedDoublePrecision": { - "$numberDouble": "200.0" - } - } - ] - } - ] - }, - { - "description": "Aggregate with $gte", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$gte": { - "$numberDouble": "0.0" - } - } - } - }, - { - "$sort": { - "_id": 1 - } - } - ] - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - }, - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Aggregate with $gt with no results", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "1.0" - } - } - } - } - ] - }, - "result": [] - } - ] - }, - { - "description": "Aggregate with $lt", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$lt": { - "$numberDouble": "1.0" - } - } - } - } - ] - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - ] - } - ] - }, - { - "description": "Aggregate with $lte", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$lte": { - "$numberDouble": "1.0" - } - } - } - }, - { - "$sort": { - "_id": 1 - } - } - ] - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - }, - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Aggregate with $lt below min", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$lt": { - "$numberDouble": "0.0" - } - } - } - } - ] - }, - "result": { - "errorContains": "must be greater than the range minimum" - } - } - ] - }, - { - "description": "Aggregate with $gt above max", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "200.0" - } - } - } - } - ] - }, - "result": { - "errorContains": "must be less than the range max" - } - } - ] - }, - { - "description": "Aggregate with $gt and $lt", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$gt": { - "$numberDouble": "0.0" - }, - "$lt": { - "$numberDouble": "2.0" - } - } - } - } - ] - }, - "result": [ - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Aggregate with equality", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - } - }, - { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - ] - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - ] - }, { "name": "aggregate", "arguments": { @@ -1413,7 +126,9 @@ { "$match": { "encryptedDoublePrecision": { - "$numberDouble": "1.0" + "$gt": { + "$numberDouble": "0" + } } } } @@ -1423,228 +138,443 @@ { "_id": 1, "encryptedDoublePrecision": { - "$numberDouble": "1.0" + "$numberDouble": "1" } } ] } - ] - }, - { - "description": "Aggregate with full range", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ + ], + "expectations": [ { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" } - } + }, + "command_name": "listCollections" } }, { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" } - } + }, + "command_name": "find" } }, { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, "encryptedDoublePrecision": { - "$gte": { - "$numberDouble": "0.0" - }, - "$lte": { - "$numberDouble": "200.0" - } + "$$type": "binData" } } - }, - { - "$sort": { - "_id": 1 + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } } } - ] - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } }, - { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } - ] - } - ] - }, - { - "description": "Aggregate with $in", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedDoublePrecision": { - "$numberDouble": "1.0" - } - } + "command_name": "insert" } }, { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, "encryptedDoublePrecision": { - "$in": [ + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ { - "$numberDouble": "0.0" + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } } ] } } } - ] - }, - "result": [ - { - "_id": 0, - "encryptedDoublePrecision": { - "$numberDouble": "0.0" - } - } - ] - } - ] - }, - { - "description": "Wrong type: Insert Int", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } } } - } + }, + "command_name": "aggregate" } } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { + ], + "outcome": { + "collection": { + "data": [ + { "_id": 0, "encryptedDoublePrecision": { - "$numberInt": "0" - } - } - }, - "result": { - "errorContains": "cannot encrypt element" - } - } - ] - }, - { - "description": "Wrong type: Find Int", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } } - } - } - } - } - }, - "operations": [ - { - "name": "find", - "arguments": { - "filter": { + ] + }, + { + "_id": 1, "encryptedDoublePrecision": { - "$gte": { - "$numberInt": "0" + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } } - } - }, - "sort": { - "_id": 1 + ] } - }, - "result": { - "errorContains": "field type is not supported" - } + ] } - ] + } } ] } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json index a9315dec960..8a0fecf786c 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -308,7 +308,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json index 28bebe0dbb0..ac77931d610 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -317,7 +317,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json index 3b3176be6f7..5dcc09dca91 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -311,7 +311,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Update.json index be2d0e9f4af..483e3d52e60 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-DoublePrecision-Update.json @@ -319,7 +319,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Aggregate.json index c689dede185..6cd837c7890 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Aggregate.json @@ -308,7 +308,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Delete.json index 4a6b34a1dc9..b251db91575 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Delete.json @@ -299,7 +299,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json index 2bf905fa652..6e09b5ea2c7 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -308,7 +308,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-InsertFind.json index a5eb4d60ec6..cbab7e76996 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-InsertFind.json @@ -302,7 +302,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Update.json index e826ea2acf0..cb6b223943a 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Int-Update.json @@ -310,7 +310,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Aggregate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Aggregate.json index d5020f5927f..5c4bf101012 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Aggregate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Aggregate.json @@ -308,7 +308,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Delete.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Delete.json index 3720d00341f..faf0c401b71 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Delete.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Delete.json @@ -299,7 +299,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json index 5e4b5ae0dea..b233b40b54a 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -308,7 +308,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-InsertFind.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-InsertFind.json index 0d485806267..1b787d4cb6b 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-InsertFind.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-InsertFind.json @@ -302,7 +302,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Update.json index 2d3321fd80b..07182bb5e22 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Rangev2-Long-Update.json @@ -310,7 +310,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Update.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Update.json index 14104e2cd8a..cb260edc0d6 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Update.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-Update.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-validatorAndPartialFieldExpression.json b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-validatorAndPartialFieldExpression.json index 4adf6fc07d2..901c4dd841c 100644 --- a/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-validatorAndPartialFieldExpression.json +++ b/driver-core/src/test/resources/client-side-encryption/legacy/fle2v2-validatorAndPartialFieldExpression.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java index 02110096d08..38ad1e618e6 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java @@ -18,8 +18,6 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.MongoUpdatedEncryptedFieldsException; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateEncryptedCollectionParams; import com.mongodb.client.model.vault.DataKeyOptions; @@ -99,7 +97,6 @@ public interface ClientEncryption extends Closeable { * {@code $gt} may also be {@code $gte}. {@code $lt} may also be {@code $lte}. * *

      Only supported when queryType is "range" and algorithm is "Range". - *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * * @param expression the Match Expression or Aggregate Expression * @param options the options @@ -109,7 +106,6 @@ public interface ClientEncryption extends Closeable { * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption * @mongodb.driver.manual reference/operator/aggregation/match/ $match */ - @Beta(Reason.SERVER) Publisher encryptExpression(Bson expression, EncryptOptions options); /** diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java new file mode 100644 index 00000000000..ec7aa9e8c20 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionRangeDefaultExplicitEncryptionTest extends AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest { + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala index 0eda4b99de2..faf193ff000 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala @@ -57,10 +57,8 @@ package object vault { /** * Range options specifies index options for a Queryable Encryption field supporting "range" queries. - *

      Note: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. * @since 4.9 */ - @Beta(Array(Reason.SERVER)) type RangeOptions = JRangeOptions object RangeOptions { diff --git a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala index 226b271ff96..4b6d9486d32 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala @@ -81,8 +81,6 @@ case class ClientEncryption(private val wrapped: JClientEncryption) extends Clos * * Only supported when queryType is "range" and algorithm is "Range". * - * '''Note:''' The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. - * * [[https://www.mongodb.com/docs/manual/core/queryable-encryption/ queryable encryption]] * * @note Requires MongoDB 8.0 or greater @@ -91,7 +89,7 @@ case class ClientEncryption(private val wrapped: JClientEncryption) extends Clos * @return a Publisher containing the queryable encrypted range expression * @since 4.9 */ - @Beta(Array(Reason.SERVER)) def encryptExpression( + def encryptExpression( expression: Document, options: EncryptOptions ): SingleObservable[Document] = diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java index 53a65ceaa02..990f196f62c 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java +++ b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java @@ -19,8 +19,6 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoException; import com.mongodb.MongoInternalException; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.client.MongoClient; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.model.vault.EncryptOptions; @@ -212,7 +210,6 @@ BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options * @param options the options * @return the encrypted expression */ - @Beta(Reason.SERVER) BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout timeoutOperation) { notNull("expression", expression); notNull("options", options); diff --git a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java index 582cf94e044..8b883273ca3 100644 --- a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java +++ b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java @@ -18,8 +18,6 @@ import com.mongodb.AutoEncryptionSettings; import com.mongodb.MongoUpdatedEncryptedFieldsException; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.client.FindIterable; import com.mongodb.client.MongoDatabase; import com.mongodb.client.model.CreateCollectionOptions; @@ -99,7 +97,6 @@ public interface ClientEncryption extends Closeable { * {@code $gt} may also be {@code $gte}. {@code $lt} may also be {@code $lte}. * *

      Only supported when queryType is "range" and algorithm is "Range". - *

      Note: The Range algorithm is unstable. It is subject to breaking changes. * * @param expression the Match Expression or Aggregate Expression * @param options the options @@ -109,7 +106,6 @@ public interface ClientEncryption extends Closeable { * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption * @mongodb.driver.manual reference/operator/aggregation/match/ $match */ - @Beta(Reason.SERVER) BsonDocument encryptExpression(Bson expression, EncryptOptions options); /** diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java index 2ac985f21a6..2a83b328298 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java @@ -47,8 +47,6 @@ import java.io.File; import java.io.IOException; import java.net.URISyntaxException; -import java.util.Base64; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -59,6 +57,8 @@ import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.Fixture.getMongoClient; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.KmsProviderType.LOCAL; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; @@ -93,13 +93,7 @@ public void setUp() throws IOException, URISyntaxException { .validationOptions(new ValidationOptions() .validator(new BsonDocument("$jsonSchema", bsonDocumentFromPath("external-schema.json"))))); - kmsProviders = new HashMap<>(); - Map localProviderMap = new HashMap<>(); - localProviderMap.put("key", - Base64.getDecoder().decode( - "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" - + "GJkTXVyZG9uSjFk")); - kmsProviders.put("local", localProviderMap); + kmsProviders = getKmsProviders(LOCAL); ClientEncryption clientEncryption = ClientEncryptions.create( ClientEncryptionSettings.builder() .keyVaultMongoClientSettings(getKeyVaultClientSettings(new TestCommandListener())) diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDecryptionEventsTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDecryptionEventsTest.java index 81f6ecca257..2271f14ae86 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDecryptionEventsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDecryptionEventsTest.java @@ -28,6 +28,7 @@ import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.vault.ClientEncryption; import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.fixture.EncryptionFixture; import com.mongodb.internal.connection.TestCommandListener; import org.bson.BsonBinary; import org.bson.BsonDocument; @@ -39,8 +40,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Base64; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -52,6 +51,7 @@ import static com.mongodb.client.Fixture.getDefaultDatabase; import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -82,13 +82,7 @@ public void setUp() { getDefaultDatabase().getCollection("decryption_events").drop(); getDefaultDatabase().createCollection("decryption_events"); - Map> kmsProviders = new HashMap<>(); - Map localProviderMap = new HashMap<>(); - localProviderMap.put("key", - Base64.getDecoder().decode( - "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" - + "GJkTXVyZG9uSjFk")); - kmsProviders.put("local", localProviderMap); + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionExplicitEncryptionTest.java index be07fd11321..068a9079dad 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionExplicitEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionExplicitEncryptionTest.java @@ -27,6 +27,7 @@ import com.mongodb.client.model.DropCollectionOptions; import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture; import org.bson.BsonBinary; import org.bson.BsonDocument; import org.bson.BsonInt32; @@ -38,8 +39,6 @@ import java.io.File; import java.util.ArrayList; -import java.util.Base64; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -51,6 +50,7 @@ import static com.mongodb.client.Fixture.getMongoClient; import static com.mongodb.client.Fixture.getMongoClientSettings; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -94,13 +94,7 @@ public void setUp() { dataKeysCollection.insertOne(key1Document); key1Id = key1Document.getBinary("_id"); - Map> kmsProviders = new HashMap<>(); - Map localProviderMap = new HashMap<>(); - localProviderMap.put("key", - Base64.getDecoder().decode( - "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" - + "GJkTXVyZG9uSjFk")); - kmsProviders.put("local", localProviderMap); + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() .keyVaultMongoClientSettings(getMongoClientSettings()) diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest.java new file mode 100644 index 00000000000..1a7cc7a00cb --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest.java @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RangeOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture.KmsProviderType; +import org.bson.BsonBinary; +import org.bson.BsonInt32; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import static com.mongodb.ClusterFixture.isServerlessTest; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * + */ + +public abstract class AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest { + private static final BsonInt32 VALUE_TO_ENCRYPT = new BsonInt32(123); + private ClientEncryption clientEncryption; + private BsonBinary keyId; + private BsonBinary payloadDefaults; + + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + @BeforeEach + public void setUp() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isStandalone()); + assumeFalse(isServerlessTest()); + + MongoNamespace dataKeysNamespace = new MongoNamespace("keyvault.datakeys"); + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(getKmsProviders(KmsProviderType.LOCAL)) + .build()); + keyId = clientEncryption.createDataKey("local"); + payloadDefaults = clientEncryption.encrypt(VALUE_TO_ENCRYPT, + getEncryptionOptions() + ); + } + + private EncryptOptions getEncryptionOptions() { + return new EncryptOptions("Range") + .keyId(keyId) + .contentionFactor(0L) + .rangeOptions(new RangeOptions() + .min(new BsonInt32(0)) + .max(new BsonInt32(1000)) + ); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + try (ClientEncryption ignored = clientEncryption) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + /** + * Validates that the omission of options trimFactor and sparsity leads to libmongocrypt-provided defaults being used instead. + */ + @Test + @DisplayName("Case 1: Uses libmongocrypt defaults") + void shouldUseDefaultsWhenNotSpecified() { + BsonBinary encryptedValue = clientEncryption.encrypt(VALUE_TO_ENCRYPT, + new EncryptOptions("Range") + .keyId(keyId) + .contentionFactor(0L) + .rangeOptions(new RangeOptions() + .min(new BsonInt32(0)) + .max(new BsonInt32(1000)) + .sparsity(2L) + .trimFactor(6) + ) + ); + + assertEquals(payloadDefaults.getData().length, encryptedValue.getData().length); + } + + @Test + @DisplayName("Case 2: Accepts `trimFactor` 0") + void shouldAcceptTrimFactor() { + BsonBinary encryptedValue = clientEncryption.encrypt(VALUE_TO_ENCRYPT, + new EncryptOptions("Range") + .keyId(keyId) + .contentionFactor(0L) + .rangeOptions(new RangeOptions() + .min(new BsonInt32(0)) + .max(new BsonInt32(1000)) + .trimFactor(0) + ) + ); + + assertTrue(payloadDefaults.getData().length < encryptedValue.getData().length); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java index 061b31482ef..be667c9b64c 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java @@ -32,6 +32,7 @@ import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.model.vault.RangeOptions; import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture; import com.mongodb.test.AfterBeforeParameterResolver; import org.bson.BsonArray; import org.bson.BsonBinary; @@ -52,8 +53,6 @@ import java.io.File; import java.util.ArrayList; -import java.util.Base64; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -65,6 +64,7 @@ import static com.mongodb.client.Fixture.getMongoClient; import static com.mongodb.client.Fixture.getMongoClientSettings; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -114,13 +114,7 @@ public void setUp(final Type type) { dataKeysCollection.drop(); dataKeysCollection.insertOne(key1Document); - Map> kmsProviders = new HashMap<>(); - Map localProviderMap = new HashMap<>(); - localProviderMap.put("key", - Base64.getDecoder().decode( - "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" - + "GJkTXVyZG9uSjFk")); - kmsProviders.put("local", localProviderMap); + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() .keyVaultMongoClientSettings(getMongoClientSettings()) @@ -318,7 +312,7 @@ public String toString() { RangeOptions getRangeOptions() { RangeOptions rangeOptions = new RangeOptions() - .setTrimFactor(1) + .trimFactor(1) .sparsity(1L); switch (this) { case DECIMAL_NO_PRECISION: diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest.java index 6b492823baa..5b84ded8b35 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest.java @@ -25,6 +25,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture; import org.bson.BsonBinary; import org.bson.BsonDocument; import org.bson.BsonString; @@ -32,8 +33,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Base64; -import java.util.HashMap; import java.util.Map; import static com.mongodb.ClusterFixture.isServerlessTest; @@ -41,6 +40,7 @@ import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.Fixture.getMongoClientSettings; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -83,13 +83,7 @@ public void setUp() { + "}") ); - Map> kmsProviders = new HashMap<>(); - Map localProviderMap = new HashMap<>(); - localProviderMap.put("key", - Base64.getDecoder().decode( - "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" - + "GJkTXVyZG9uSjFk")); - kmsProviders.put("local", localProviderMap); + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() .keyVaultMongoClientSettings(getMongoClientSettings()) diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java index e4d81a9b0d8..576f585fd45 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java @@ -23,6 +23,7 @@ import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.vault.ClientEncryption; import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.fixture.EncryptionFixture.KmsProviderType; import com.mongodb.internal.connection.TestCommandListener; import org.bson.BsonBinary; import org.bson.BsonDocument; @@ -39,11 +40,11 @@ import java.util.HashMap; import java.util.Map; -import static com.mongodb.ClusterFixture.getEnv; import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; @@ -80,25 +81,12 @@ public void setUp() { // Step 2: Create encrypted client and client encryption - Map> kmsProviders = new HashMap>() {{ - put("aws", new HashMap() {{ - put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); - put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); - }}); - put("azure", new HashMap() {{ - put("tenantId", getEnv("AZURE_TENANT_ID")); - put("clientId", getEnv("AZURE_CLIENT_ID")); - put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); - }}); - put("gcp", new HashMap() {{ - put("email", getEnv("GCP_EMAIL")); - put("privateKey", getEnv("GCP_PRIVATE_KEY")); - }}); - put("local", new HashMap() {{ - put("key", "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" - + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); - }}); - }}; + Map> kmsProviders = getKmsProviders( + KmsProviderType.AWS, + KmsProviderType.AZURE, + KmsProviderType.GCP, + KmsProviderType.LOCAL + ); HashMap schemaMap = new HashMap() {{ put("db.coll", BsonDocument.parse("{" diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionCorpusTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionCorpusTest.java index 4570540c7e1..a812e174047 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionCorpusTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionCorpusTest.java @@ -24,6 +24,7 @@ import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.vault.ClientEncryption; import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.fixture.EncryptionFixture.KmsProviderType; import org.bson.BsonBinary; import org.bson.BsonBinarySubType; import org.bson.BsonDocument; @@ -45,11 +46,11 @@ import java.util.HashMap; import java.util.Map; -import static com.mongodb.ClusterFixture.getEnv; import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.client.Fixture.getMongoClientSettings; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; import static java.util.Arrays.asList; import static org.bson.codecs.configuration.CodecRegistries.fromCodecs; import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; @@ -102,28 +103,12 @@ public void setUp() throws IOException, URISyntaxException { dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-local.json")); // Step 4: Configure our objects - Map> kmsProviders = new HashMap>() {{ - put("aws", new HashMap() {{ - put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); - put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); - }}); - put("azure", new HashMap() {{ - put("tenantId", getEnv("AZURE_TENANT_ID")); - put("clientId", getEnv("AZURE_CLIENT_ID")); - put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); - }}); - put("gcp", new HashMap() {{ - put("email", getEnv("GCP_EMAIL")); - put("privateKey", getEnv("GCP_PRIVATE_KEY")); - }}); - put("kmip", new HashMap() {{ - put("endpoint", getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698")); - }}); - put("local", new HashMap() {{ - put("key", "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" - + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); - }}); - }}; + Map> kmsProviders = getKmsProviders( + KmsProviderType.AWS, + KmsProviderType.AZURE, + KmsProviderType.GCP, + KmsProviderType.KMIP, + KmsProviderType.LOCAL); HashMap schemaMap = new HashMap<>(); schemaMap.put("db.coll", schemaDocument); diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java new file mode 100644 index 00000000000..1e3a12c19b9 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionRangeDefaultExplicitEncryptionTest extends AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest { + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/JsonPoweredCrudTestHelper.java b/driver-sync/src/test/functional/com/mongodb/client/JsonPoweredCrudTestHelper.java index e49fcd66725..f2859247a70 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/JsonPoweredCrudTestHelper.java +++ b/driver-sync/src/test/functional/com/mongodb/client/JsonPoweredCrudTestHelper.java @@ -305,11 +305,10 @@ BsonDocument getRunCommandResult(final BsonDocument collectionOptions, final Bso response = database.runCommand(clientSession, command, readPreference, BsonDocument.class); } } - response.remove("ok"); - response.remove("operationTime"); - response.remove("opTime"); - response.remove("electionId"); - response.remove("$clusterTime"); + if (response.containsKey("ok")) { + // The server response to the command may contain a double value for the "ok" field, but the expected result is an integer. + response.put("ok", new BsonInt32((int) response.get("ok").asDouble().getValue())); + } return toResult(response); } diff --git a/driver-sync/src/test/functional/com/mongodb/fixture/EncryptionFixture.java b/driver-sync/src/test/functional/com/mongodb/fixture/EncryptionFixture.java new file mode 100644 index 00000000000..f6edb9a14ed --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/fixture/EncryptionFixture.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.fixture; + +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.getEnv; + +/** + * Helper class for the CSFLE/QE tests. + */ +public final class EncryptionFixture { + + private EncryptionFixture() { + //NOP + } + + public static Map> getKmsProviders(final KmsProviderType... kmsProviderTypes) { + return new HashMap>() {{ + for (KmsProviderType kmsProviderType : kmsProviderTypes) { + switch (kmsProviderType) { + case LOCAL: + put("local", new HashMap() {{ + put("key", "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + }}); + break; + case GCP: + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + }}); + break; + case AWS: + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + break; + case AZURE: + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + }}); + break; + case KMIP: + put("kmip", new HashMap() {{ + put("endpoint", getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698")); + }}); + break; + default: + throw new IllegalArgumentException("Unsupported KMS provider type: " + kmsProviderType); + } + } + }}; + } + + public enum KmsProviderType { + LOCAL, + AWS, + AZURE, + GCP, + KMIP + } +} From 505915869563affeb43a124900b21d06e7f166cf Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Mon, 19 Aug 2024 11:10:49 -0700 Subject: [PATCH 68/90] Fix exception propagation in Async API methods (#1479) - Resolve an issue where exceptions thrown during thenRun, thenSupply, and related operations in the asynchronous API were not properly propagated to the completion callback. This issue was addressed by replacing `unsafeFinish` with `finish`, ensuring that exceptions are caught and correctly passed to the completion callback when executed on different threads. - Update existing Async API tests to ensure they simulate separate async thread execution. - Modify the async callback to catch and handle exceptions locally. Exceptions are now directly processed and passed as an error argument to the callback function, avoiding propagation to the parent callback. - Move `callback.onResult` outside the catch block to ensure it's not invoked twice when an exception occurs. JAVA-5562 --- .../mongodb/internal/async/AsyncFunction.java | 26 ++++ .../mongodb/internal/async/AsyncRunnable.java | 8 +- .../mongodb/internal/async/AsyncSupplier.java | 24 ++-- .../connection/InternalStreamConnection.java | 6 +- .../InternalStreamConnectionInitializer.java | 9 +- ...t.java => AsyncFunctionsAbstractTest.java} | 24 +--- ...tract.java => AsyncFunctionsTestBase.java} | 91 ++++++++++---- .../async/SameThreadAsyncFunctionsTest.java | 94 ++++++++++++++ .../SeparateThreadAsyncFunctionsTest.java | 118 ++++++++++++++++++ 9 files changed, 343 insertions(+), 57 deletions(-) rename driver-core/src/test/unit/com/mongodb/internal/async/{AsyncFunctionsTest.java => AsyncFunctionsAbstractTest.java} (97%) rename driver-core/src/test/unit/com/mongodb/internal/async/{AsyncFunctionsTestAbstract.java => AsyncFunctionsTestBase.java} (80%) create mode 100644 driver-core/src/test/unit/com/mongodb/internal/async/SameThreadAsyncFunctionsTest.java create mode 100644 driver-core/src/test/unit/com/mongodb/internal/async/SeparateThreadAsyncFunctionsTest.java diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncFunction.java b/driver-core/src/main/com/mongodb/internal/async/AsyncFunction.java index 5be92558ee0..7203d3a4945 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncFunction.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncFunction.java @@ -18,6 +18,8 @@ import com.mongodb.lang.Nullable; +import java.util.concurrent.atomic.AtomicBoolean; + /** * See {@link AsyncRunnable} *

      @@ -33,4 +35,28 @@ public interface AsyncFunction { * @param callback the callback */ void unsafeFinish(T value, SingleResultCallback callback); + + /** + * Must be invoked at end of async chain or when executing a callback handler supplied by the caller. + * + * @param callback the callback provided by the method the chain is used in. + */ + default void finish(final T value, final SingleResultCallback callback) { + final AtomicBoolean callbackInvoked = new AtomicBoolean(false); + try { + this.unsafeFinish(value, (v, e) -> { + if (!callbackInvoked.compareAndSet(false, true)) { + throw new AssertionError(String.format("Callback has been already completed. It could happen " + + "if code throws an exception after invoking an async method. Value: %s", v), e); + } + callback.onResult(v, e); + }); + } catch (Throwable t) { + if (!callbackInvoked.compareAndSet(false, true)) { + throw t; + } else { + callback.completeExceptionally(t); + } + } + } } diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java index a81b2fdd12c..d4ead3c5b96 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java @@ -171,7 +171,9 @@ default AsyncRunnable thenRun(final AsyncRunnable runnable) { return (c) -> { this.unsafeFinish((r, e) -> { if (e == null) { - runnable.unsafeFinish(c); + /* If 'runnable' is executed on a different thread from the one that executed the initial 'finish()', + then invoking 'finish()' within 'runnable' will catch and propagate any exceptions to 'c' (the callback). */ + runnable.finish(c); } else { c.completeExceptionally(e); } @@ -236,7 +238,7 @@ default AsyncRunnable thenRunIf(final Supplier condition, final AsyncRu return; } if (matched) { - runnable.unsafeFinish(callback); + runnable.finish(callback); } else { callback.complete(callback); } @@ -253,7 +255,7 @@ default AsyncSupplier thenSupply(final AsyncSupplier supplier) { return (c) -> { this.unsafeFinish((r, e) -> { if (e == null) { - supplier.unsafeFinish(c); + supplier.finish(c); } else { c.completeExceptionally(e); } diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java b/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java index b7d24dd3df5..77c289c8723 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java @@ -18,6 +18,7 @@ import com.mongodb.lang.Nullable; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; @@ -54,18 +55,25 @@ default void unsafeFinish(@Nullable final Void value, final SingleResultCallback } /** - * Must be invoked at end of async chain. + * Must be invoked at end of async chain or when executing a callback handler supplied by the caller. + * + * @see #thenApply(AsyncFunction) + * @see #thenConsume(AsyncConsumer) + * @see #onErrorIf(Predicate, AsyncFunction) * @param callback the callback provided by the method the chain is used in */ default void finish(final SingleResultCallback callback) { - final boolean[] callbackInvoked = {false}; + final AtomicBoolean callbackInvoked = new AtomicBoolean(false); try { this.unsafeFinish((v, e) -> { - callbackInvoked[0] = true; + if (!callbackInvoked.compareAndSet(false, true)) { + throw new AssertionError(String.format("Callback has been already completed. It could happen " + + "if code throws an exception after invoking an async method. Value: %s", v), e); + } callback.onResult(v, e); }); } catch (Throwable t) { - if (callbackInvoked[0]) { + if (!callbackInvoked.compareAndSet(false, true)) { throw t; } else { callback.completeExceptionally(t); @@ -80,9 +88,9 @@ default void finish(final SingleResultCallback callback) { */ default AsyncSupplier thenApply(final AsyncFunction function) { return (c) -> { - this.unsafeFinish((v, e) -> { + this.finish((v, e) -> { if (e == null) { - function.unsafeFinish(v, c); + function.finish(v, c); } else { c.completeExceptionally(e); } @@ -99,7 +107,7 @@ default AsyncRunnable thenConsume(final AsyncConsumer consumer) { return (c) -> { this.unsafeFinish((v, e) -> { if (e == null) { - consumer.unsafeFinish(v, c); + consumer.finish(v, c); } else { c.completeExceptionally(e); } @@ -131,7 +139,7 @@ default AsyncSupplier onErrorIf( return; } if (errorMatched) { - errorFunction.unsafeFinish(e, callback); + errorFunction.finish(e, callback); } else { callback.completeExceptionally(e); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java index 98e43fe5fbe..de12e5f092f 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java @@ -610,6 +610,7 @@ private void sendCommandMessageAsync(final int messageId, final Decoder d return; } assertNotNull(responseBuffers); + T commandResult; try { updateSessionContext(operationContext.getSessionContext(), responseBuffers); boolean commandOk = @@ -624,13 +625,14 @@ private void sendCommandMessageAsync(final int messageId, final Decoder d } commandEventSender.sendSucceededEvent(responseBuffers); - T result1 = getCommandResult(decoder, responseBuffers, messageId, operationContext.getTimeoutContext()); - callback.onResult(result1, null); + commandResult = getCommandResult(decoder, responseBuffers, messageId, operationContext.getTimeoutContext()); } catch (Throwable localThrowable) { callback.onResult(null, localThrowable); + return; } finally { responseBuffers.close(); } + callback.onResult(commandResult, null); })); } }); diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java index ee509873e40..6fca357b080 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java @@ -101,7 +101,14 @@ public void startHandshakeAsync(final InternalConnection internalConnection, fin callback.onResult(null, t instanceof MongoException ? mapHelloException((MongoException) t) : t); } else { setSpeculativeAuthenticateResponse(helloResult); - callback.onResult(createInitializationDescription(helloResult, internalConnection, startTime), null); + InternalConnectionInitializationDescription initializationDescription; + try { + initializationDescription = createInitializationDescription(helloResult, internalConnection, startTime); + } catch (Throwable localThrowable) { + callback.onResult(null, localThrowable); + return; + } + callback.onResult(initializationDescription, null); } }); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java similarity index 97% rename from driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java rename to driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java index 20553fe881a..611b90fc675 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java @@ -25,10 +25,10 @@ import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.internal.async.AsyncRunnable.beginAsync; -import static org.junit.jupiter.api.Assertions.assertThrows; -final class AsyncFunctionsTest extends AsyncFunctionsTestAbstract { +abstract class AsyncFunctionsAbstractTest extends AsyncFunctionsTestBase { private static final TimeoutContext TIMEOUT_CONTEXT = new TimeoutContext(new TimeoutSettings(0, 0, 0, 0L, 0)); + @Test void test1Method() { // the number of expected variations is often: 1 + N methods invoked @@ -760,25 +760,6 @@ void testVariables() { }); } - @Test - void testInvalid() { - setIsTestingAbruptCompletion(false); - setAsyncStep(true); - assertThrows(IllegalStateException.class, () -> { - beginAsync().thenRun(c -> { - async(3, c); - throw new IllegalStateException("must not cause second callback invocation"); - }).finish((v, e) -> {}); - }); - assertThrows(IllegalStateException.class, () -> { - beginAsync().thenRun(c -> { - async(3, c); - }).finish((v, e) -> { - throw new IllegalStateException("must not cause second callback invocation"); - }); - }); - } - @Test void testDerivation() { // Demonstrates the progression from nested async to the API. @@ -866,5 +847,4 @@ void testDerivation() { }).finish(callback); }); } - } diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestAbstract.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java similarity index 80% rename from driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestAbstract.java rename to driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java index 7cc8b456f1c..1229dbcfcad 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestAbstract.java +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java @@ -17,11 +17,17 @@ package com.mongodb.internal.async; import com.mongodb.client.TestListener; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.opentest4j.AssertionFailedError; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Supplier; @@ -31,11 +37,12 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -public class AsyncFunctionsTestAbstract { +public abstract class AsyncFunctionsTestBase { private final TestListener listener = new TestListener(); private final InvocationTracker invocationTracker = new InvocationTracker(); private boolean isTestingAbruptCompletion = false; + private ExecutorService asyncExecutor; void setIsTestingAbruptCompletion(final boolean b) { isTestingAbruptCompletion = b; @@ -53,6 +60,23 @@ public void listenerAdd(final String s) { listener.add(s); } + /** + * Create an executor service for async operations before each test. + * + * @return the executor service. + */ + public abstract ExecutorService createAsyncExecutor(); + + @BeforeEach + public void setUp() { + asyncExecutor = createAsyncExecutor(); + } + + @AfterEach + public void shutDown() { + asyncExecutor.shutdownNow(); + } + void plain(final int i) { int cur = invocationTracker.getNextOption(2); if (cur == 0) { @@ -98,32 +122,47 @@ Integer syncReturns(final int i) { return affectedReturns(i); } + + public void submit(final Runnable task) { + asyncExecutor.execute(task); + } void async(final int i, final SingleResultCallback callback) { assertTrue(invocationTracker.isAsyncStep); if (isTestingAbruptCompletion) { + /* We should not test for abrupt completion in a separate thread. Once a callback is registered for an async operation, + the Async Framework does not handle exceptions thrown outside of callbacks by the executing thread. Such exception management + should be the responsibility of the thread conducting the asynchronous operations. */ affected(i); - callback.complete(callback); - - } else { - try { - affected(i); + submit(() -> { callback.complete(callback); - } catch (Throwable t) { - callback.onResult(null, t); - } + }); + } else { + submit(() -> { + try { + affected(i); + callback.complete(callback); + } catch (Throwable t) { + callback.onResult(null, t); + } + }); } } void asyncReturns(final int i, final SingleResultCallback callback) { assertTrue(invocationTracker.isAsyncStep); if (isTestingAbruptCompletion) { - callback.complete(affectedReturns(i)); + int result = affectedReturns(i); + submit(() -> { + callback.complete(result); + }); } else { - try { - callback.complete(affectedReturns(i)); - } catch (Throwable t) { - callback.onResult(null, t); - } + submit(() -> { + try { + callback.complete(affectedReturns(i)); + } catch (Throwable t) { + callback.onResult(null, t); + } + }); } } @@ -200,24 +239,26 @@ private void assertBehavesSame(final Supplier sync, final Runnable betwee AtomicReference actualValue = new AtomicReference<>(); AtomicReference actualException = new AtomicReference<>(); - AtomicBoolean wasCalled = new AtomicBoolean(false); + CompletableFuture wasCalledFuture = new CompletableFuture<>(); try { async.accept((v, e) -> { actualValue.set(v); actualException.set(e); - if (wasCalled.get()) { + if (wasCalledFuture.isDone()) { fail(); } - wasCalled.set(true); + wasCalledFuture.complete(null); }); } catch (Throwable e) { fail("async threw instead of using callback"); } + await(wasCalledFuture, "Callback should have been called"); + // The following code can be used to debug variations: // System.out.println("===VARIATION START"); // System.out.println("sync: " + expectedEvents); -// System.out.println("callback called?: " + wasCalled.get()); +// System.out.println("callback called?: " + wasCalledFuture.isDone()); // System.out.println("value -- sync: " + expectedValue + " -- async: " + actualValue.get()); // System.out.println("excep -- sync: " + expectedException + " -- async: " + actualException.get()); // System.out.println("exception mode: " + (isTestingAbruptCompletion @@ -229,7 +270,7 @@ private void assertBehavesSame(final Supplier sync, final Runnable betwee throw (AssertionFailedError) actualException.get(); } - assertTrue(wasCalled.get(), "callback should have been called"); + assertTrue(wasCalledFuture.isDone(), "callback should have been called"); assertEquals(expectedEvents, listener.getEventStrings(), "steps should have matched"); assertEquals(expectedValue, actualValue.get()); assertEquals(expectedException == null, actualException.get() == null, @@ -242,6 +283,14 @@ private void assertBehavesSame(final Supplier sync, final Runnable betwee listener.clear(); } + protected T await(final CompletableFuture voidCompletableFuture, final String errorMessage) { + try { + return voidCompletableFuture.get(1, TimeUnit.MINUTES); + } catch (InterruptedException | ExecutionException | TimeoutException e) { + throw new AssertionError(errorMessage); + } + } + /** * Tracks invocations: allows testing of all variations of a method calls */ diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/SameThreadAsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/SameThreadAsyncFunctionsTest.java new file mode 100644 index 00000000000..04b9290af55 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/SameThreadAsyncFunctionsTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@DisplayName("The same thread async functions") +public class SameThreadAsyncFunctionsTest extends AsyncFunctionsAbstractTest { + @Override + public ExecutorService createAsyncExecutor() { + return new SameThreadExecutorService(); + } + + @Test + void testInvalid() { + setIsTestingAbruptCompletion(false); + setAsyncStep(true); + IllegalStateException illegalStateException = new IllegalStateException("must not cause second callback invocation"); + + assertThrows(IllegalStateException.class, () -> { + beginAsync().thenRun(c -> { + async(3, c); + throw illegalStateException; + }).finish((v, e) -> { + assertNotEquals(e, illegalStateException); + }); + }); + assertThrows(IllegalStateException.class, () -> { + beginAsync().thenRun(c -> { + async(3, c); + }).finish((v, e) -> { + throw illegalStateException; + }); + }); + } + + private static class SameThreadExecutorService extends AbstractExecutorService { + @Override + public void execute(@NotNull final Runnable command) { + command.run(); + } + + @Override + public void shutdown() { + } + + @NotNull + @Override + public List shutdownNow() { + return Collections.emptyList(); + } + + @Override + public boolean isShutdown() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isTerminated() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean awaitTermination(final long timeout, @NotNull final TimeUnit unit) { + return true; + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/SeparateThreadAsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/SeparateThreadAsyncFunctionsTest.java new file mode 100644 index 00000000000..401c4d2c18e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/SeparateThreadAsyncFunctionsTest.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@DisplayName("Separate thread async functions") +public class SeparateThreadAsyncFunctionsTest extends AsyncFunctionsAbstractTest { + + private UncaughtExceptionHandler uncaughtExceptionHandler; + + @Override + public ExecutorService createAsyncExecutor() { + uncaughtExceptionHandler = new UncaughtExceptionHandler(); + return Executors.newFixedThreadPool(1, r -> { + Thread thread = new Thread(r); + thread.setUncaughtExceptionHandler(uncaughtExceptionHandler); + return thread; + }); + } + + /** + * This test covers the scenario where a callback is erroneously invoked after a callback had been completed. + * Such behavior is considered a bug and is not expected. An AssertionError should be thrown if an asynchronous invocation + * attempts to use a callback that has already been marked as completed. + */ + @Test + void shouldPropagateAssertionErrorIfCallbackHasBeenCompletedAfterAsyncInvocation() { + //given + setIsTestingAbruptCompletion(false); + setAsyncStep(true); + IllegalStateException illegalStateException = new IllegalStateException("must not cause second callback invocation"); + AtomicBoolean callbackInvoked = new AtomicBoolean(false); + + //when + beginAsync().thenRun(c -> { + async(3, c); + throw illegalStateException; + }).thenRun(c -> { + assertInvokedOnce(callbackInvoked); + c.complete(c); + }) + .finish((v, e) -> { + assertEquals(illegalStateException, e); + } + ); + + //then + Throwable exception = uncaughtExceptionHandler.getException(); + assertNotNull(exception); + assertEquals(AssertionError.class, exception.getClass()); + assertEquals("Callback has been already completed. It could happen " + + "if code throws an exception after invoking an async method. Value: null", exception.getMessage()); + } + + @Test + void shouldPropagateUnexpectedExceptionFromFinishCallback() { + //given + setIsTestingAbruptCompletion(false); + setAsyncStep(true); + IllegalStateException illegalStateException = new IllegalStateException("must not cause second callback invocation"); + + //when + beginAsync().thenRun(c -> { + async(3, c); + }).finish((v, e) -> { + throw illegalStateException; + }); + + //then + Throwable exception = uncaughtExceptionHandler.getException(); + assertNotNull(exception); + assertEquals(illegalStateException, exception); + } + + private static void assertInvokedOnce(final AtomicBoolean callbackInvoked1) { + assertTrue(callbackInvoked1.compareAndSet(false, true)); + } + + private final class UncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { + + private final CompletableFuture completable = new CompletableFuture<>(); + + @Override + public void uncaughtException(final Thread t, final Throwable e) { + completable.complete(e); + } + + public Throwable getException() { + return await(completable, "No exception was thrown"); + } + } +} From 634fd094992f09cf39deec0ea2afeb12b4db61ef Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Thu, 22 Aug 2024 19:14:10 -0700 Subject: [PATCH 69/90] Add exact vector search option. (#1473) JAVA-5557 --------- Co-authored-by: Valentin Kovalenko --- .../com/mongodb/client/model/Aggregates.java | 35 +------- .../com/mongodb/client/model/Projections.java | 2 +- .../ApproximateVectorSearchOptions.java | 37 +++++++++ .../search/ExactVectorSearchOptions.java | 38 +++++++++ .../search/VectorSearchConstructibleBson.java | 3 +- .../model/search/VectorSearchOptions.java | 27 +++++-- .../client/model/search/package-info.java | 3 +- .../AggregatesSearchIntegrationTest.java | 42 +++++++--- .../model/AggregatesSpecification.groovy | 23 +++--- .../model/search/VectorSearchOptionsTest.java | 80 ++++++++++++++----- .../org/mongodb/scala/model/Aggregates.scala | 29 +------ .../model/search/VectorSearchOptions.scala | 17 +++- .../mongodb/scala/model/search/package.scala | 29 ++++++- .../mongodb/scala/model/AggregatesSpec.scala | 19 ++--- 14 files changed, 256 insertions(+), 128 deletions(-) create mode 100644 driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java create mode 100644 driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java diff --git a/driver-core/src/main/com/mongodb/client/model/Aggregates.java b/driver-core/src/main/com/mongodb/client/model/Aggregates.java index 53e9e1eaf52..152cacc659b 100644 --- a/driver-core/src/main/com/mongodb/client/model/Aggregates.java +++ b/driver-core/src/main/com/mongodb/client/model/Aggregates.java @@ -52,7 +52,6 @@ import static com.mongodb.client.model.GeoNearOptions.geoNearOptions; import static com.mongodb.client.model.densify.DensifyOptions.densifyOptions; import static com.mongodb.client.model.search.SearchOptions.searchOptions; -import static com.mongodb.client.model.search.VectorSearchOptions.vectorSearchOptions; import static com.mongodb.internal.Iterables.concat; import static com.mongodb.internal.client.model.Util.sizeAtLeast; import static java.util.Arrays.asList; @@ -947,42 +946,13 @@ public static Bson searchMeta(final SearchCollector collector, final SearchOptio * @param queryVector The query vector. The number of dimensions must match that of the {@code index}. * @param path The field to be searched. * @param index The name of the index to use. - * @param numCandidates The number of candidates. - * @param limit The limit on the number of documents produced by the pipeline stage. - * @return The {@code $vectorSearch} pipeline stage. - * - * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch - * @mongodb.atlas.manual atlas-search/scoring/ Scoring - * @mongodb.server.release 6.0.10 - * @since 4.11 - */ - @Beta(Reason.SERVER) - public static Bson vectorSearch( - final FieldSearchPath path, - final Iterable queryVector, - final String index, - final long numCandidates, - final long limit) { - return vectorSearch(notNull("path", path), notNull("queryVector", queryVector), notNull("index", index), numCandidates, limit, - vectorSearchOptions()); - } - - /** - * Creates a {@code $vectorSearch} pipeline stage supported by MongoDB Atlas. - * You may use the {@code $meta: "vectorSearchScore"} expression, e.g., via {@link Projections#metaVectorSearchScore(String)}, - * to extract the relevance score assigned to each found document. - * - * @param queryVector The query vector. The number of dimensions must match that of the {@code index}. - * @param path The field to be searched. - * @param index The name of the index to use. - * @param numCandidates The number of candidates. * @param limit The limit on the number of documents produced by the pipeline stage. * @param options Optional {@code $vectorSearch} pipeline stage fields. * @return The {@code $vectorSearch} pipeline stage. * * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch * @mongodb.atlas.manual atlas-search/scoring/ Scoring - * @mongodb.server.release 6.0.10 + * @mongodb.server.release 6.0.11 * @since 4.11 */ @Beta(Reason.SERVER) @@ -990,7 +960,6 @@ public static Bson vectorSearch( final FieldSearchPath path, final Iterable queryVector, final String index, - final long numCandidates, final long limit, final VectorSearchOptions options) { notNull("path", path); @@ -1003,7 +972,6 @@ public BsonDocument toBsonDocument(final Class documentCl Document specificationDoc = new Document("path", path.toValue()) .append("queryVector", queryVector) .append("index", index) - .append("numCandidates", numCandidates) .append("limit", limit); specificationDoc.putAll(options.toBsonDocument(documentClass, codecRegistry)); return new Document("$vectorSearch", specificationDoc).toBsonDocument(documentClass, codecRegistry); @@ -1015,7 +983,6 @@ public String toString() { + ", path=" + path + ", queryVector=" + queryVector + ", index=" + index - + ", numCandidates=" + numCandidates + ", limit=" + limit + ", options=" + options + '}'; diff --git a/driver-core/src/main/com/mongodb/client/model/Projections.java b/driver-core/src/main/com/mongodb/client/model/Projections.java index 98fd2810ed5..18cda97c62b 100644 --- a/driver-core/src/main/com/mongodb/client/model/Projections.java +++ b/driver-core/src/main/com/mongodb/client/model/Projections.java @@ -215,7 +215,7 @@ public static Bson metaSearchScore(final String fieldName) { /** * Creates a projection to the given field name of the vectorSearchScore, - * for use with {@link Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, long, VectorSearchOptions)}. + * for use with {@link Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions)} . * Calling this method is equivalent to calling {@link #meta(String, String)} with {@code "vectorSearchScore"} as the second argument. * * @param fieldName the field name diff --git a/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java new file mode 100644 index 00000000000..04faa18769e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; + +/** + * Represents optional fields of the {@code $vectorSearch} pipeline stage of an aggregation pipeline. + *

      + * Configures approximate vector search for Atlas Vector Search to enable searches that may not return the exact closest vectors. + * + * @see Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) + * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch + * @mongodb.server.release 6.0.11, 7.0.2 + * @since 5.2 + */ +@Sealed +@Beta(Reason.SERVER) +public interface ApproximateVectorSearchOptions extends VectorSearchOptions { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java new file mode 100644 index 00000000000..ff8bf01e956 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; + +/** + * Represents optional fields of the {@code $vectorSearch} pipeline stage of an aggregation pipeline. + *

      + * Configures exact vector search for Atlas Vector Search to enable precise matching, ensuring that + * results are the closest vectors to a given query vector. + * + * @see Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) + * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch + * @mongodb.server.release 6.0.16, 7.0.10, 7.3.2 + * @since 5.2 + */ +@Sealed +@Beta(Reason.SERVER) +public interface ExactVectorSearchOptions extends VectorSearchOptions { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchConstructibleBson.java b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchConstructibleBson.java index b9a2f806744..3e281890822 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchConstructibleBson.java +++ b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchConstructibleBson.java @@ -23,7 +23,8 @@ import static com.mongodb.assertions.Assertions.notNull; -final class VectorSearchConstructibleBson extends AbstractConstructibleBson implements VectorSearchOptions { +final class VectorSearchConstructibleBson extends AbstractConstructibleBson + implements ApproximateVectorSearchOptions, ExactVectorSearchOptions { /** * An {@linkplain Immutable immutable} {@link BsonDocument#isEmpty() empty} instance. */ diff --git a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java index df3607d039b..f27a4a2828b 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java @@ -25,9 +25,9 @@ /** * Represents optional fields of the {@code $vectorSearch} pipeline stage of an aggregation pipeline. * - * @see Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, long, VectorSearchOptions) + * @see Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch - * @mongodb.server.release 6.0.10 + * @mongodb.server.release 6.0.11 * @since 4.11 */ @Sealed @@ -37,7 +37,7 @@ public interface VectorSearchOptions extends Bson { * Creates a new {@link VectorSearchOptions} with the filter specified. * * @param filter A filter that is applied before applying the - * {@link Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, long, VectorSearchOptions) queryVector}. + * {@link Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) queryVector} * One may use {@link Filters} to create this filter, though not all filters may be supported. * See the MongoDB documentation for the list of supported filters. * @return A new {@link VectorSearchOptions}. @@ -66,11 +66,24 @@ public interface VectorSearchOptions extends Bson { VectorSearchOptions option(String name, Object value); /** - * Returns {@link VectorSearchOptions} that represents server defaults. + * Returns {@link ApproximateVectorSearchOptions} that represents server defaults. * - * @return {@link VectorSearchOptions} that represents server defaults. + * @param numCandidates The number of candidates. + * @return {@link ApproximateVectorSearchOptions} that represents server defaults. + * @since 5.2 */ - static VectorSearchOptions vectorSearchOptions() { - return VectorSearchConstructibleBson.EMPTY_IMMUTABLE; + static ApproximateVectorSearchOptions approximateVectorSearchOptions(long numCandidates) { + return (ApproximateVectorSearchOptions) VectorSearchConstructibleBson.EMPTY_IMMUTABLE.option("numCandidates", numCandidates); + } + + /** + * Returns {@link ExactVectorSearchOptions} that represents server defaults with the {@code exact} option set to true. + * + * @return {@link ExactVectorSearchOptions} that represents server defaults. + * @since 5.2 + */ + static ExactVectorSearchOptions exactVectorSearchOptions() { + return (ExactVectorSearchOptions) VectorSearchConstructibleBson.EMPTY_IMMUTABLE + .option("exact", true); } } diff --git a/driver-core/src/main/com/mongodb/client/model/search/package-info.java b/driver-core/src/main/com/mongodb/client/model/search/package-info.java index c3664cb5560..e04257df29c 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/package-info.java +++ b/driver-core/src/main/com/mongodb/client/model/search/package-info.java @@ -25,7 +25,8 @@ * * @see com.mongodb.client.model.Aggregates#search(SearchOperator, SearchOptions) * @see com.mongodb.client.model.Aggregates#search(SearchCollector, SearchOptions) - * @see com.mongodb.client.model.Aggregates#vectorSearch(FieldSearchPath, java.lang.Iterable, java.lang.String, long, long, VectorSearchOptions) + * @see com.mongodb.client.model.Aggregates#vectorSearch(com.mongodb.client.model.search.FieldSearchPath, java.lang.Iterable, java.lang.String, + * long, com.mongodb.client.model.search.VectorSearchOptions) * @mongodb.atlas.manual atlas-search/ Atlas Search * @mongodb.atlas.manual atlas-search/query-syntax/ Atlas Search aggregation pipeline stages * @since 4.7 diff --git a/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesSearchIntegrationTest.java b/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesSearchIntegrationTest.java index b67cf37af93..29de80dda32 100644 --- a/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesSearchIntegrationTest.java +++ b/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesSearchIntegrationTest.java @@ -28,7 +28,6 @@ import org.bson.json.JsonWriterSettings; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -98,7 +97,8 @@ import static com.mongodb.client.model.search.SearchScoreExpression.multiplyExpression; import static com.mongodb.client.model.search.SearchScoreExpression.pathExpression; import static com.mongodb.client.model.search.SearchScoreExpression.relevanceExpression; -import static com.mongodb.client.model.search.VectorSearchOptions.vectorSearchOptions; +import static com.mongodb.client.model.search.VectorSearchOptions.approximateVectorSearchOptions; +import static com.mongodb.client.model.search.VectorSearchOptions.exactVectorSearchOptions; import static java.time.ZoneOffset.UTC; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; @@ -227,6 +227,7 @@ final class AggregatesSearchIntegrationTest { private static final MongoNamespace MFLIX_EMBEDDED_MOVIES_NS = new MongoNamespace("sample_mflix", "embedded_movies"); private static final MongoNamespace AIRBNB_LISTINGS_AND_REVIEWS_NS = new MongoNamespace("sample_airbnb", "listingsAndReviews"); private static final List QUERY_VECTOR = unmodifiableList(asList(-0.0072121937, -0.030757688, 0.014948666, -0.018497631, -0.019035352, 0.028149737, -0.0019593239, -0.02012424, -0.025649332, -0.007985169, 0.007830574, 0.023726976, -0.011507247, -0.022839734, 0.00027999343, -0.010431803, 0.03823202, -0.025756875, -0.02074262, -0.0042883316, -0.010841816, 0.010552791, 0.0015266258, -0.01791958, 0.018430416, -0.013980767, 0.017247427, -0.010525905, 0.0126230195, 0.009255537, 0.017153326, 0.008260751, -0.0036060968, -0.019210111, -0.0133287795, -0.011890373, -0.0030599732, -0.0002904958, -0.001310697, -0.020715732, 0.020890493, 0.012428096, 0.0015837587, -0.006644225, -0.028499257, -0.005098275, -0.0182691, 0.005760345, -0.0040665213, 0.00075491105, 0.007844017, 0.00040791242, 0.0006780336, 0.0027037326, -0.0041370974, -0.022275126, 0.004775642, -0.0045235846, -0.003659869, -0.0020567859, 0.021602973, 0.01010917, -0.011419867, 0.0043689897, -0.0017946466, 0.000101610516, -0.014061426, -0.002626435, -0.00035540052, 0.0062174085, 0.020809835, 0.0035220778, -0.0071046497, -0.005041142, 0.018067453, 0.012569248, -0.021683631, 0.020245226, 0.017247427, 0.017032338, 0.01037131, -0.036296222, -0.026334926, 0.041135717, 0.009625221, 0.032155763, -0.025057837, 0.027827105, -0.03323121, 0.0055721425, 0.005716655, 0.01791958, 0.012078577, -0.011117399, -0.0016005626, -0.0033254733, -0.007702865, 0.034306653, 0.0063854465, -0.009524398, 0.006069535, 0.012696956, -0.0042883316, -0.013167463, -0.0024667988, -0.02356566, 0.00052721944, -0.008858967, 0.039630096, -0.0064593833, -0.0016728189, -0.0020366213, 0.00622413, -0.03739855, 0.0028616884, -0.0102301575, 0.017717933, -0.0041068504, -0.0060896995, -0.01876649, 0.0069903834, 0.025595559, 0.029762903, -0.006388807, 0.017247427, 0.0022080203, -0.029117636, -0.029870447, -0.0049739266, -0.011809715, 0.023243025, 0.009510955, 0.030004878, 0.0015837587, -0.018524516, 0.007931396, -0.03589293, 0.013590919, -0.026361812, 0.002922182, 0.025743432, 0.014894894, 0.0012989342, -0.0016232478, 0.006251016, 0.029789789, -0.004664737, 0.017812036, -0.013436324, -0.0102301575, 0.016884465, -0.017220542, 0.010156221, 0.00014503786, 0.03933435, 0.018658947, 0.016897907, 0.0076961434, -0.029843561, -0.02021834, 0.015056211, 0.01002179, -0.0031994449, -0.03796316, -0.008133043, 0.03707592, 0.032128878, 9.483648E-05, 0.0017627194, -0.0007544909, 0.006647586, 0.020903936, -0.032559056, 0.025272924, -0.012804501, 0.019210111, 0.0022987607, 0.013301893, -0.0047218697, -0.022853177, -0.02162986, 0.006788738, 0.0092286505, 0.024184039, -0.015419173, -0.006479548, -0.00180977, 0.0060728956, -0.0030919004, 0.0022449887, -0.004046357, 0.012663349, -0.028579915, 0.0047722813, -0.6775295, -0.018779935, -0.018484188, -0.017449073, -0.01805401, 0.026630674, 0.008018777, 0.013436324, -0.0034683058, 0.00070912065, -0.005027699, 0.009658828, -0.0031792803, -0.010478854, 0.0034951917, -0.011594627, 0.02441257, -0.042533796, -0.012414653, 0.006261098, -0.012266779, 0.026630674, -0.017852364, -0.02184495, 0.02176429, 0.019263884, 0.00984031, -0.012609577, -0.01907568, -0.020231783, -0.002886894, 0.02706085, -0.0042345594, 0.02265153, 0.05769755, 0.021522315, -0.014195856, 0.011144285, 0.0038077426, 0.024573887, -0.03578539, -0.004476534, 0.016521502, -0.019815048, 0.00071836275, 0.008173372, 0.013436324, 0.021885278, -0.0147604635, -0.021777734, 0.0052595916, -0.011668564, -0.02356566, -0.0049974523, 0.03473683, -0.0255149, 0.012831387, -0.009658828, -0.0031036632, -0.001386314, -0.01385978, 0.008294359, -0.02512505, -0.0012308789, 0.008711093, 0.03610802, 0.016225755, 0.014034539, 0.0032431346, -0.017852364, 0.017906137, 0.005787231, -0.03514012, 0.017207097, -0.0019542826, -0.010189828, 0.010808208, -0.017408744, -0.0074944976, 0.011009854, 0.00887241, 0.009652107, -0.0062409337, 0.009766373, 0.009759651, -0.0020819916, -0.02599885, 0.0040665213, 0.016064439, -0.019035352, -0.013604362, 0.020231783, -0.025272924, -0.01196431, -0.01509654, 0.0010233518, -0.00869765, -0.01064017, 0.005249509, -0.036807057, 0.00054570363, 0.0021777733, -0.009302587, -0.00039362916, 0.011386259, 0.013382551, 0.03046194, 0.0032380936, 0.037801843, -0.036807057, -0.006244295, 0.002392862, -0.01346321, -0.008953068, -0.0025861058, -0.022853177, 0.018242212, -0.0031624765, 0.009880639, -0.0017341529, 0.0072054723, 0.014693249, 0.026630674, 0.008435511, -0.012562525, 0.011581183, -0.0028768117, -0.01059312, -0.027746446, 0.0077969665, 2.468059E-05, -0.011151006, 0.0152712995, -0.01761039, 0.023256468, 0.0076625356, 0.0026163526, -0.028795004, 0.0025877862, -0.017583502, -0.016588718, 0.017556617, 0.00075491105, 0.0075885993, -0.011722336, -0.010620005, -0.017274313, -0.008025498, -0.036376882, 0.009457182, -0.007265966, -0.0048663826, -0.00494368, 0.003616179, 0.0067820163, 0.0033775652, -0.016037554, 0.0043320213, -0.007978448, -0.012925488, 0.029413383, -0.00016583256, -0.018040568, 0.004180787, -0.011453475, -0.013886666, -0.0072121937, 0.006486269, 0.008005333, -0.01412864, -0.00061796, -0.025635887, -0.006630782, 0.02074262, -0.007192029, 0.03906549, -0.0030885397, -0.00088976155, -0.022033151, -0.008758144, 0.00049361185, 0.009342916, -0.014988995, -0.008704372, 0.014276514, -0.012300386, -0.0020063745, 0.030892119, -0.010532626, 0.019653732, 0.0028583275, 0.006163636, 0.0071517, -0.017489402, -0.008448954, -0.004352186, 0.013201071, 0.01090231, 0.0004110631, 0.03306989, 0.006916447, 0.002922182, 0.023888292, -0.009067334, 0.012434817, -0.051298663, 0.016279528, -0.02741037, 0.026227381, -0.005182294, 0.008153207, -0.026603786, 0.0045571923, 0.018067453, 0.038016934, 0.028042194, 0.0077431942, 0.015499831, -0.020298999, 0.0013123773, -0.021334114, -0.026281154, -0.0012720482, -0.0045571923, 0.006086339, 0.0028952959, -0.003041489, 0.007931396, -0.0005406625, -0.023444671, -0.0038715971, 0.0070374343, -0.0019979726, 0.024089938, 0.0020903936, -0.024210924, 0.007319738, -0.005995598, 0.032478396, 0.020998036, 0.01654839, 0.033876475, 0.025098165, 0.021132467, -0.017099554, -0.013516982, 0.01306664, 0.010525905, -0.02335057, -0.013543868, -0.03583916, 0.021172797, -0.033607613, -0.0036094578, -0.007911232, -0.0054578763, 0.013227956, 0.00993441, 0.025810648, 0.02255743, -0.013678298, 0.012273501, 0.00040497174, 0.0019072321, 0.0008170851, 0.01540573, 0.015580489, 0.005239427, 0.003989224, -0.013254843, 0.024708318, 0.0046680975, -0.034360424, -0.0041942303, 0.0077095865, -0.0053503322, -0.024399128, -0.02644247, 0.0062476555, 0.021885278, -0.0010922474, -0.014209299, 0.018295985, 0.0135640325, 0.0033842868, 0.0017812036, 0.004735313, 0.006486269, -0.008072549, 0.009551284, 0.007938119, 0.0101696635, 0.021750847, 0.014034539, 0.0071449787, -0.008448954, 0.010841816, -0.008274195, -0.014531932, -0.0024785616, 0.0018601815, 0.009564727, -0.011130841, -0.020581303, 0.012985982, 0.019976366, -0.030542599, -0.021818062, -0.018551402, -0.0092286505, -0.024385685, 0.0036901159, -0.0061367503, -0.00034048714, -0.007057599, -0.014558818, -0.022221355, 0.023377456, 0.026119838, -0.0008813597, 0.004520224, 0.0027843907, -0.022382671, 0.0018248934, 0.13313992, 0.013685021, -6.170148E-05, 0.015876237, 0.005417547, -0.008314524, -0.019169783, -0.016494617, 0.016844137, -0.0046412116, 0.024305027, -0.027827105, 0.023162367, 0.0143034, -0.0029893972, -0.014626034, -0.018215327, 0.0073264595, 0.024331912, -0.0070777633, -0.0004259765, -0.00042345593, -0.0034262962, -0.00423792, -0.016185427, -0.017946465, -5.9706024E-05, 0.016467731, -0.014773907, -0.022664975, -0.009322752, -0.027585128, 0.0020651878, -0.010532626, -0.010546069, 0.009174879, -0.0011098915, 0.026469355, 0.022006266, -0.013039754, 0.023458114, 0.005481402, -0.00050705485, -0.012092019, 0.0055990284, -0.007057599, -0.012266779, 0.03253217, 0.007071042, -0.01699201, 0.06597847, -0.013436324, 0.0070038266, -0.009981461, 0.024829306, 0.0067383265, 0.0056292755, 0.0018534599, -0.020057024, 0.011735778, 0.0025491375, -0.022194467, 0.0012468424, -0.0051621296, -0.018457301, -0.008509448, -0.011594627, -0.0152712995, -0.001858501, -0.014921781, -0.0056696045, -0.0066979975, -0.02008391, 0.0040093884, 0.032935463, -0.0032935461, -0.0074205613, -0.014088311, -0.0014762144, -0.011218221, 0.011984475, -0.01898158, -0.027208723, -0.008072549, 0.010942639, 0.0183632, 0.04148524, -0.0009922648, -0.017086111, 0.013483374, 0.019841935, 0.024264697, 0.011601348, -0.0077431942, -0.020258669, -0.005770427, 0.013429603, -0.011554297, -0.012831387, -1.4752561E-06, 0.011594627, -0.012683514, -0.012824666, 0.02180462, 0.011023297, 0.012468425, -0.0029860365, -0.0076289284, -0.021293784, 0.005068028, 0.017812036, 0.0007708746, -0.008684208, 0.0048126103, -0.0076558143, 0.019169783, -0.0076558143, 0.028579915, -0.011574462, -0.03196756, -0.0011334168, -0.030219967, 0.023901735, 0.014021097, -0.016776921, 0.0030045207, -0.0019257163, -0.023579102, 0.004197591, 0.00012497831, -0.016803807, 0.01915634, -0.010472132, -0.042130504, -0.038016934, -0.007702865, -0.0025861058, -0.010512462, -0.013537147, -0.013382551, -0.0036397045, 0.0053032814, 0.0046277684, -0.021952493, -0.016588718, -0.031886905, 0.0058208387, -0.00043689896, -0.01337583, 0.018349757, 0.015244413, 0.00900684, -0.017677605, 0.01523097, 0.010337702, -0.024426013, -0.021965936, -0.014182413, 0.008596827, 0.029628472, 0.058611676, -0.015446059, 0.021374442, -0.0095042335, 0.00091748784, 0.021132467, -0.011285436, -0.0035724894, -0.027907763, 0.027302826, 0.004184148, 0.026281154, -0.0026802071, -0.015163755, 0.005699851, 0.023122039, 0.0075415485, -0.020057024, -0.0109359175, -0.018309427, 0.017529732, 0.0020685487, -0.012441538, 0.0023239665, 0.012038247, -0.017543174, 0.029332725, 0.01399421, -0.0092488155, -1.0607403E-05, 0.019371428, -0.0315105, 0.023471557, -0.009430297, 0.00022097006, 0.013301893, -0.020110795, -0.0072928523, 0.007649093, 0.011547576, 0.026805433, -0.01461259, -0.018968137, -0.0104250815, 0.0005646079, 0.031456728, -0.0020147765, -0.024224367, 0.002431511, -0.019371428, -0.025017507, -0.02365976, -0.004318578, -0.04457714, 0.0029826758, -0.020473758, -0.016118212, -0.00068181445, -0.03446797, -0.020715732, -0.04256068, -0.013792564, 0.013873223, 0.011413146, -0.002419748, 0.0123877665, -0.0011115718, 0.007978448, 0.021441657, 0.004405958, 0.0042480025, 0.022920392, -0.0067920987, 0.011083791, -0.017529732, -0.03659197, -0.0066005355, -0.023888292, -0.016521502, 0.009591613, -0.0008590946, 0.013846337, -0.021092137, -0.012562525, -0.0028415236, 0.02882189, 5.3378342E-05, -0.006943333, -0.012226449, -0.035570297, -0.024547001, 0.022355784, -0.018416973, 0.014209299, 0.010035234, 0.0046916227, 0.009672271, -0.00067635323, -0.024815861, 0.0007049197, 0.0017055863, -0.0051251613, 0.0019391594, 0.027665788, -0.007306295, -0.013369109, 0.006308149, 0.009699157, 0.000940173, 0.024842748, 0.017220542, -0.0053032814, -0.008395182, 0.011359373, 0.013214514, 0.0062711807, 0.004110211, -0.019277327, -0.01412864, -0.009322752, 0.007124814, 0.0035119955, -0.024036165, -0.012831387, -0.006734966, -0.0019694061, -0.025367027, -0.006630782, 0.016010666, 0.0018534599, -0.0030717358, -0.017717933, 0.008489283, 0.010875423, -0.0028700903, 0.0121323485, 0.004930237, 0.009947853, -0.02992422, 0.021777734, 0.00015081417, 0.010344423, 0.0017543174, 0.006166997, -0.0015467904, 0.010089005, 0.0111711705, -0.010740994, -0.016965123, -0.006771934, 0.014464716, 0.007192029, -0.0006175399, -0.010855259, -0.003787578, 0.015647706, 0.01002179, -0.015378844, -0.01598378, 0.015741806, -0.0039119264, -0.008422068, 0.03253217, -0.019210111, -0.014975552, 0.0025810648, 0.0035556855, 8.449164E-05, -0.034172222, -0.006395529, -0.0036867552, 0.020769505, 0.009766373, -0.017543174, -0.013557311, 0.0031994449, -0.0014577302, 0.01832287, -0.009907524, -0.024654545, 0.0049940916, 0.016965123, 0.004476534, 0.022261683, -0.009369803, 0.0015308268, -0.010102449, -0.001209874, -0.023807634, -0.008348132, -0.020312442, 0.030892119, -0.0058309208, -0.005128522, -0.02437224, 0.01478735, -0.011016576, -0.010290652, -0.00503106, 0.016884465, 0.02132067, -0.014236185, -0.004903351, 0.01902191, 0.0028179984, 0.019505858, -0.021535758, -0.0038514326, 0.0112115, 0.0038682362, 0.003217929, -0.0012770894, -0.013685021, -0.008381739, 0.0025256122, 0.029386498, 0.018645504, 0.005323446, -0.0032784226, -0.0043253, 0.0007998612, 0.019949479, 0.025770318, -0.0030868594, 0.018968137, -0.010236879, -0.005370497, -0.024748646, -0.014047982, 0.005760345, -0.03610802, 0.0042009517, -0.0034817487, 0.003385967, 0.006560206, -0.006294706, -0.02400928, -0.006140111, -0.0017980073, -0.012481867, -0.0033960494, -0.00097210024, 0.014061426, -0.017596947, -0.023202697, 0.0028499255, -0.016010666, -0.028149737, 0.0024752007, -0.018941252, 0.0056158323, -0.012912045, 0.0054410724, 0.003054932, 0.019559631, -0.0048932685, -0.007823853, -0.017099554, 0.025662774, 0.02572999, 0.004379072, -0.010223436, 0.0031036632, -0.011755943, -0.025622444, -0.030623257, 0.019895706, -0.02052753, -0.006637504, -0.001231719, -0.013980767, -0.02706085, -0.012071854, -0.0041370974, -0.008885853, 0.0001885177, 0.2460615, -0.009389968, -0.010714107, 0.0326666, 0.0009561366, 0.022624645, 0.009793258, 0.019452088, -0.004493338, -0.007097928, -0.0022298652, 0.012401209, -0.0036229007, -0.00023819396, -0.017502844, -0.014209299, -0.030542599, -0.004863022, 0.005128522, -0.03081146, 0.02118624, -0.0042177555, 0.0032448152, -0.019936036, 0.015311629, 0.0070508774, -0.02021834, 0.0016148458, 0.04317906, 0.01385978, 0.004211034, -0.02534014, -0.00030309867, -0.011930703, -0.00207527, -0.021643303, 0.01575525, -0.0042883316, 0.0069231684, 0.017946465, 0.03081146, 0.0043857936, 3.646951E-05, -0.0214551, 0.0089933975, 0.022785962, -0.008106156, 0.00082884775, -0.0006717322, -0.0025457768, -0.017059224, -0.035113234, 0.054982055, 0.021266898, -0.0071046497, -0.012636462, 0.016965123, 0.01902191, -0.0061737187, 0.00076247274, 0.0002789432, 0.030112421, -0.0026768465, 0.0015207445, -0.004926876, 0.0067551304, -0.022624645, 0.0005003333, 0.0035523248, -0.0041337362, 0.011634956, -0.0183632, -0.02820351, -0.0061737187, -0.022355784, -0.03796316, 0.041888528, 0.019626847, 0.02211381, 0.001474534, 0.0037640526, 0.0085228905, 0.013140577, 0.012616298, -0.010599841, -0.022920392, 0.011278715, -0.011493804, -0.0044966987, -0.028741231, 0.015782135, -0.011500525, -0.00027621258, -0.0046378504, -0.003280103, 0.026993636, 0.0109359175, 0.027168395, 0.014370616, -0.011890373, -0.020648519, -0.03465617, 0.001964365, 0.034064677, -0.02162986, -0.01081493, 0.014397502, 0.008038941, 0.029789789, -0.012044969, 0.0038379894, -0.011245107, 0.0048193317, -0.0048563, 0.0142899575, 0.009779816, 0.0058510853, -0.026845763, 0.013281729, -0.0005818318, 0.009685714, -0.020231783, -0.004197591, 0.015593933, -0.016319858, -0.019492416, -0.008314524, 0.014693249, 0.013617805, -0.02917141, -0.0052058194, -0.0061838008, 0.0072726877, -0.010149499, -0.019035352, 0.0070374343, -0.0023138842, 0.0026583623, -0.00034111727, 0.0019038713, 0.025945077, -0.014693249, 0.009820145, -0.0037506097, 0.00041127318, -0.024909964, 0.008603549, -0.0041707046, 0.019398315, -0.024022723, -0.013409438, -0.027880875, 0.0023558936, -0.024237812, 0.034172222, -0.006251016, -0.048152987, -0.01523097, -0.002308843, -0.013691742, -0.02688609, 0.007810409, 0.011513968, -0.006647586, -0.011735778, 0.0017408744, -0.17422187, 0.01301959, 0.018860593, -0.00068013405, 0.008791751, -0.031618044, 0.017946465, 0.011735778, -0.03129541, 0.0033607613, 0.0072861305, 0.008227143, -0.018443858, -0.014007653, 0.009961297, 0.006284624, -0.024815861, 0.012676792, 0.014222742, 0.0036632298, 0.0028364826, -0.012320551, -0.0050478633, 0.011729057, 0.023135481, 0.025945077, 0.005676326, -0.007192029, 0.0015308268, -0.019492416, -0.008932903, -0.021737404, 0.012925488, 0.008092714, 0.03245151, -0.009457182, -0.018524516, 0.0025188907, -0.008569942, 0.0022769158, -0.004617686, 0.01315402, 0.024291582, -0.001880346, 0.0014274834, 0.04277577, 0.010216715, -0.018699275, 0.018645504, 0.008059106, 0.02997799, -0.021576088, 0.004846218, 0.015741806, 0.0023542133, 0.03142984, 0.01372535, 0.01598378, 0.001151901, -0.012246614, -0.004184148, -0.023605987, 0.008657321, -0.025770318, -0.019048795, -0.023054823, 0.005535174, -0.018161554, -0.019761277, 0.01385978, -0.016655933, 0.01416897, 0.015311629, 0.008919461, 0.0077499156, 0.023888292, 0.015257857, 0.009087498, 0.0017845642, 0.0013762318, -0.023713533, 0.027464142, -0.014021097, -0.024681432, -0.006741687, 0.0016450927, -0.005804035, -0.002821359, 0.0056796866, -0.023189254, 0.00723908, -0.013483374, -0.018390086, -0.018847149, 0.0061905226, 0.033365637, 0.008489283, 0.015257857, 0.019694062, -0.03019308, -0.012253336, 0.0021744126, -0.00754827, 0.01929077, 0.025044393, 0.017677605, 0.02503095, 0.028579915, 0.01774482, 0.0029961187, -0.019895706, 0.001165344, -0.0075281053, 0.02105181, -0.009221929, 0.023404341, -0.0028079161, -0.0037237236, 0.02847237, 0.0009821824, 0.04629785, -0.017771706, -0.038904175, 0.00869765, 0.0016249281, 0.020984594, -0.10867358, -0.008395182, -0.0010830053, 0.008059106, -0.020097353, 0.0020383017, 0.008038941, -0.009047169, -0.007252523, 0.0286068, -0.0037774958, -0.024923407, 0.005279756, -0.009524398, 0.011527412, -0.0020198175, 0.019452088, 0.014384058, -0.025609002, 0.006025845, -0.030542599, 0.016790364, 0.019223554, -0.012434817, 0.003901844, -0.007817131, -0.027612016, 0.008314524, 0.007938119, -0.0004868903, 0.014747021, -0.009457182, 0.014706692, -0.018847149, 0.015311629, 0.015647706, -0.0031288688, -0.0032717013, 0.008879132, -0.034629285, 0.0090337265, 0.004382433, 0.011305601, -0.028391711, 0.0053268066, 0.0003566608, -0.019169783, 0.011507247, 0.023592545, -0.006603896, -0.009685714, 0.010714107, -0.027907763, 0.006412333, 0.0045706355, -0.029816674, 0.0047958065, 0.0018500991, -0.011500525, 0.0030179636, 0.015997224, -0.022140697, -0.0001849469, -0.014263071, 0.011540854, -0.006607257, -0.01871272, -0.0038480717, -0.0024903242, -0.031214751, -0.0050478633, 0.021481987, -0.012912045, 0.028122852, -0.018605174, -0.00723908, 0.0023609349, -0.0073331813, 0.014935223, -0.005699851, -0.0068895607, -0.015244413, 0.029789789, -0.02458733, 0.0004453009, 0.0015577129, 0.0048596608, 0.009376524, -0.011984475, -0.014518489, 0.015647706, 0.0068794787, 0.0065534846, 0.003107024, -0.01973439, 0.027383484, -0.015459502, -0.006318231, 0.020863606, -0.0021357639, -0.0076692575, -0.021266898, -0.046862457, 0.025326697, 0.016521502, -0.0036833945, 0.0029860365, -0.016306413, 0.026496243, -0.016803807, 0.008724537, -0.0025407355, -0.027302826, 0.017798591, 0.0060796174, -0.014007653, -0.01650806, -0.0095042335, 0.009242094, -0.009342916, 0.010330981, 0.009544563, 0.018591732, 0.0036867552, 0.0194252, 0.0092488155, -0.007823853, 0.0015501512, -0.012031525, 0.010203271, -0.0074272826, -0.020258669, 0.025662774, -0.03032751, 0.014854565, 0.010835094, 0.0007708746, 0.0009989863, -0.014007653, -0.012871716, 0.023444671, 0.03323121, -0.034575514, -0.024291582, 0.011634956, -0.025958521, -0.01973439, 0.0029742739, 0.0067148013, 0.0022399474, 0.011802994, 0.011151006, -0.0116416775, 0.030166194, 0.013039754, -0.022517102, -0.011466918, -0.0033053088, 0.006156915, 0.004829414, 0.006029206, -0.016534945, 0.015325071, -0.0109359175, 0.032854803, -0.001010749, 0.0021155993, -0.011702171, -0.009766373, 0.00679882, 0.0040900465, -0.019438643, -0.006758491, -0.0040060277, 0.022436442, 0.025850976, 0.006150193, 0.018632062, -0.0077230297, -0.015298186, -0.017381858, 0.01911601, -0.005763706, -0.0022281848, -0.031994447, 0.0015972018, 0.028848775, 0.014572261, -0.0073264595, -0.009551284, -0.0052058194, 0.014518489, -0.0041068504, 0.010754436, 0.0055519775, -0.005804035, -0.0054007433, 0.028579915, -0.01791958, -0.015284742, 0.036807057, 0.015069654, -0.0023810994, -0.0038648755, 0.0015467904, -0.0037136413, 0.0023458113, 0.019008467, -0.011547576, -0.010001626, 0.012347437, 0.0155267175, 0.01907568, -0.003041489, -0.0132414, 0.017449073, 0.00060073606, -0.008536334, 0.008233866, -0.0085430555, -0.02365976, 0.024089938, -0.0034615842, -0.006580371, 0.008327967, -0.01509654, 0.009692436, 0.025635887, 0.0020282194, -0.04022159, -0.0021290423, -0.012407931, -0.0021727323, 0.006506434, -0.005320085, -0.008240587, 0.020984594, -0.014491603, 0.003592654, 0.0072121937, -0.03081146, 0.043770555, 0.009302587, -0.003217929, 0.019008467, -0.011271994, 0.02917141, 0.0019576435, -0.0077431942, -0.0030448497, -0.023726976, 0.023377456, -0.006382086, 0.025716545, -0.017341528, 0.0035556855, -0.019129453, -0.004311857, -0.003253217, -0.014935223, 0.0036363439, 0.018121226, -0.0066543072, 0.02458733, 0.0035691285, 0.0039085653, -0.014209299, 0.020191453, 0.0357585, 0.007830574, -0.024130266, -0.008912739, 0.008314524, -0.0346024, -0.0014005973, -0.006788738, -0.021777734, 0.010465411, -0.004012749, -0.00679882, 0.009981461, -0.026227381, 0.027033964, -0.015567047, -0.0063115098, 0.0023071626, 0.01037131, 0.015741806, -0.020635074, -0.012945653)); + private static final int LIMIT = 2; private static Map> collectionHelpers; @BeforeAll @@ -242,27 +243,34 @@ void beforeEach() { assumeTrue(isAtlasSearchTest()); } - @Test - void vectorSearch() { + private static Stream vectorSearchArgs(){ + return Stream.of( + arguments(approximateVectorSearchOptions(LIMIT + 1)), + arguments(exactVectorSearchOptions()) + ); + } + + @ParameterizedTest + @MethodSource("vectorSearchArgs") + void vectorSearch(final VectorSearchOptions vectorSearchOptions) { assumeTrue(serverVersionAtLeast(7, 1)); CollectionHelper collectionHelper = collectionHelpers.get(MFLIX_EMBEDDED_MOVIES_NS); - int limit = 2; assertAll( () -> { List pipeline = singletonList( Aggregates.vectorSearch( // `multi` is used here only to verify that it is tolerated fieldPath("plot_embedding").multi("ignored"), - QUERY_VECTOR, "sample_mflix__embedded_movies", limit + 1, limit) + QUERY_VECTOR, "sample_mflix__embedded_movies", LIMIT, vectorSearchOptions) ); - Asserters.size(limit) + Asserters.size(LIMIT) .accept(collectionHelper.aggregate(pipeline), msgSupplier(pipeline)); }, () -> { List pipeline = asList( Aggregates.vectorSearch( - fieldPath("plot_embedding"), QUERY_VECTOR, "sample_mflix__embedded_movies", limit + 1, limit, - vectorSearchOptions().filter(gte("year", 2016))), + fieldPath("plot_embedding"), QUERY_VECTOR, "sample_mflix__embedded_movies", LIMIT, + vectorSearchOptions.filter(gte("year", 2016))), Aggregates.project( metaVectorSearchScore("vectorSearchScore")) ); @@ -276,15 +284,23 @@ void vectorSearch() { ); } - @Test - void vectorSearchSupportedFilters() { + private static Stream vectorSearchSupportedFiltersArgs(){ + return Stream.of( + arguments(approximateVectorSearchOptions(1)), + arguments(exactVectorSearchOptions()) + ); + } + + @ParameterizedTest + @MethodSource("vectorSearchSupportedFiltersArgs") + void vectorSearchSupportedFilters(final VectorSearchOptions vectorSearchOptions) { assumeTrue(serverVersionAtLeast(7, 1)); CollectionHelper collectionHelper = collectionHelpers.get(MFLIX_EMBEDDED_MOVIES_NS); Consumer asserter = filter -> { List pipeline = singletonList( Aggregates.vectorSearch( - fieldPath("plot_embedding"), QUERY_VECTOR, "sample_mflix__embedded_movies", 1, 1, - vectorSearchOptions().filter(filter)) + fieldPath("plot_embedding"), QUERY_VECTOR, "sample_mflix__embedded_movies", 1, + vectorSearchOptions.filter(filter)) ); Asserters.nonEmpty() .accept(collectionHelper.aggregate(pipeline), msgSupplier(pipeline)); diff --git a/driver-core/src/test/unit/com/mongodb/client/model/AggregatesSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/AggregatesSpecification.groovy index aee7fea8fa1..21df76e401e 100644 --- a/driver-core/src/test/unit/com/mongodb/client/model/AggregatesSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/client/model/AggregatesSpecification.groovy @@ -98,7 +98,8 @@ import static com.mongodb.client.model.search.SearchOperator.exists import static com.mongodb.client.model.search.SearchOptions.searchOptions import static com.mongodb.client.model.search.SearchPath.fieldPath import static com.mongodb.client.model.search.SearchPath.wildcardPath -import static com.mongodb.client.model.search.VectorSearchOptions.vectorSearchOptions +import static com.mongodb.client.model.search.VectorSearchOptions.approximateVectorSearchOptions +import static com.mongodb.client.model.search.VectorSearchOptions.exactVectorSearchOptions import static java.util.Arrays.asList import static org.bson.BsonDocument.parse @@ -849,16 +850,15 @@ class AggregatesSpecification extends Specification { }''') } - def 'should render $vectorSearch'() { + def 'should render approximate $vectorSearch'() { when: BsonDocument vectorSearchDoc = toBson( vectorSearch( fieldPath('fieldName').multi('ignored'), [1.0d, 2.0d], 'indexName', - 2, 1, - vectorSearchOptions() + approximateVectorSearchOptions(2) .filter(Filters.ne("fieldName", "fieldValue")) ) @@ -877,15 +877,17 @@ class AggregatesSpecification extends Specification { }''') } - def 'should render $vectorSearch with no options'() { + def 'should render exact $vectorSearch'() { when: BsonDocument vectorSearchDoc = toBson( vectorSearch( - fieldPath('fieldName'), + fieldPath('fieldName').multi('ignored'), [1.0d, 2.0d], 'indexName', - 2, - 1 + 1, + exactVectorSearchOptions() + .filter(Filters.ne("fieldName", "fieldValue")) + ) ) @@ -895,8 +897,9 @@ class AggregatesSpecification extends Specification { "path": "fieldName", "queryVector": [1.0, 2.0], "index": "indexName", - "numCandidates": {"$numberLong": "2"}, - "limit": {"$numberLong": "1"} + "exact": true, + "limit": {"$numberLong": "1"}, + "filter": {"fieldName": {"$ne": "fieldValue"}} } }''') } diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/VectorSearchOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/VectorSearchOptionsTest.java index 4d3ad463b13..190347dc1fe 100644 --- a/driver-core/src/test/unit/com/mongodb/client/model/search/VectorSearchOptionsTest.java +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/VectorSearchOptionsTest.java @@ -16,7 +16,9 @@ package com.mongodb.client.model.search; import com.mongodb.client.model.Filters; +import org.bson.BsonBoolean; import org.bson.BsonDocument; +import org.bson.BsonInt64; import org.bson.BsonString; import org.junit.jupiter.api.Test; @@ -24,10 +26,19 @@ final class VectorSearchOptionsTest { @Test - void vectorSearchOptions() { + void approximateVectorSearchOptions() { assertEquals( - new BsonDocument(), - VectorSearchOptions.vectorSearchOptions() + new BsonDocument().append("numCandidates", new BsonInt64(1)), + VectorSearchOptions.approximateVectorSearchOptions(1) + .toBsonDocument() + ); + } + + @Test + void exactVectorSearchOptions() { + assertEquals( + new BsonDocument().append("exact", new BsonBoolean(true)), + VectorSearchOptions.exactVectorSearchOptions() .toBsonDocument() ); } @@ -35,50 +46,81 @@ void vectorSearchOptions() { @Test void option() { assertEquals( - VectorSearchOptions.vectorSearchOptions() + VectorSearchOptions.approximateVectorSearchOptions(1) .filter(Filters.lt("fieldName", 1)) .toBsonDocument(), - VectorSearchOptions.vectorSearchOptions() + VectorSearchOptions.approximateVectorSearchOptions(1) .option("filter", Filters.lt("fieldName", 1)) .toBsonDocument()); } @Test - void filter() { + void filterApproximate() { assertEquals( new BsonDocument() - .append("filter", Filters.lt("fieldName", 1).toBsonDocument()), - VectorSearchOptions.vectorSearchOptions() + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("numCandidates", new BsonInt64(1)), + VectorSearchOptions.approximateVectorSearchOptions(1) .filter(Filters.lt("fieldName", 1)) .toBsonDocument() ); } @Test - void options() { + void filterExact() { + assertEquals( + new BsonDocument() + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("exact", new BsonBoolean(true)), + VectorSearchOptions.exactVectorSearchOptions() + .filter(Filters.lt("fieldName", 1)) + .toBsonDocument() + ); + } + + @Test + void optionsApproximate() { + assertEquals( + new BsonDocument() + .append("name", new BsonString("value")) + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("numCandidates", new BsonInt64(1)), + VectorSearchOptions.approximateVectorSearchOptions(1) + .option("name", "value") + .filter(Filters.lt("fieldName", 0)) + .option("filter", Filters.lt("fieldName", 1)) + .option("numCandidates", new BsonInt64(1)) + .toBsonDocument() + ); + } + + @Test + void optionsExact() { assertEquals( new BsonDocument() .append("name", new BsonString("value")) - .append("filter", Filters.lt("fieldName", 1).toBsonDocument()), - VectorSearchOptions.vectorSearchOptions() + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("exact", new BsonBoolean(true)), + VectorSearchOptions.exactVectorSearchOptions() .option("name", "value") .filter(Filters.lt("fieldName", 0)) .option("filter", Filters.lt("fieldName", 1)) + .option("exact", new BsonBoolean(true)) .toBsonDocument() ); } @Test - void vectorSearchOptionsIsUnmodifiable() { - String expected = VectorSearchOptions.vectorSearchOptions().toBsonDocument().toJson(); - VectorSearchOptions.vectorSearchOptions().option("name", "value"); - assertEquals(expected, VectorSearchOptions.vectorSearchOptions().toBsonDocument().toJson()); + void approximateVectorSearchOptionsIsUnmodifiable() { + String expected = VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson(); + VectorSearchOptions.approximateVectorSearchOptions(1).option("name", "value"); + assertEquals(expected, VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson()); } @Test - void vectorSearchOptionsIsImmutable() { - String expected = VectorSearchOptions.vectorSearchOptions().toBsonDocument().toJson(); - VectorSearchOptions.vectorSearchOptions().toBsonDocument().append("name", new BsonString("value")); - assertEquals(expected, VectorSearchOptions.vectorSearchOptions().toBsonDocument().toJson()); + void approximateVectorSearchOptionsIsImmutable() { + String expected = VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson(); + VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().append("name", new BsonString("value")); + assertEquals(expected, VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson()); } } diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala index 0fff8c4c8ba..ed08ad5d551 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala @@ -730,32 +730,6 @@ object Aggregates { * @param queryVector The query vector. The number of dimensions must match that of the `index`. * @param path The field to be searched. * @param index The name of the index to use. - * @param numCandidates The number of candidates. - * @param limit The limit on the number of documents produced by the pipeline stage. - * @return The `\$vectorSearch` pipeline stage. - * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] - * @note Requires MongoDB 6.0.10 or greater - * @since 4.11 - */ - @Beta(Array(Reason.SERVER)) - def vectorSearch( - path: FieldSearchPath, - queryVector: Iterable[java.lang.Double], - index: String, - numCandidates: Long, - limit: Long - ): Bson = - JAggregates.vectorSearch(path, queryVector.asJava, index, numCandidates, limit) - - /** - * Creates a `\$vectorSearch` pipeline stage supported by MongoDB Atlas. - * You may use the `\$meta: "vectorSearchScore"` expression, e.g., via [[Projections.metaVectorSearchScore]], - * to extract the relevance score assigned to each found document. - * - * @param queryVector The query vector. The number of dimensions must match that of the `index`. - * @param path The field to be searched. - * @param index The name of the index to use. - * @param numCandidates The number of candidates. * @param limit The limit on the number of documents produced by the pipeline stage. * @param options Optional `\$vectorSearch` pipeline stage fields. * @return The `\$vectorSearch` pipeline stage. @@ -768,11 +742,10 @@ object Aggregates { path: FieldSearchPath, queryVector: Iterable[java.lang.Double], index: String, - numCandidates: Long, limit: Long, options: VectorSearchOptions ): Bson = - JAggregates.vectorSearch(path, queryVector.asJava, index, numCandidates, limit, options) + JAggregates.vectorSearch(path, queryVector.asJava, index, limit, options) /** * Creates an `\$unset` pipeline stage that removes/excludes fields from documents diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala index ab25650ca7a..0778399fc4b 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala @@ -22,16 +22,25 @@ import com.mongodb.client.model.search.{ VectorSearchOptions => JVectorSearchOpt * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. * * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] - * @note Requires MongoDB 6.0.10 or greater + * @note Requires MongoDB 6.0.11, or greater * @since 4.11 */ @Beta(Array(Reason.SERVER)) object VectorSearchOptions { /** - * Returns `VectorSearchOptions` that represents server defaults. + * Returns `ApproximateVectorSearchOptions` that represents server defaults. * - * @return `VectorSearchOptions` that represents server defaults. + * @return `ApproximateVectorSearchOptions` that represents server defaults. */ - def vectorSearchOptions(): VectorSearchOptions = JVectorSearchOptions.vectorSearchOptions() + def approximateVectorSearchOptions(numCandidates: Long): ApproximateVectorSearchOptions = + JVectorSearchOptions.approximateVectorSearchOptions(numCandidates) + + /** + * Returns `ExactVectorSearchOptions` that represents server defaults with the `exact` option set to true. + * + * @return `ExactVectorSearchOptions` that represents server defaults. + * @since 5.2 + */ + def exactVectorSearchOptions(): ExactVectorSearchOptions = JVectorSearchOptions.exactVectorSearchOptions() } diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala index fb9e393dd1b..557060324cd 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala @@ -223,13 +223,40 @@ package object search { * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. * * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] - * @note Requires MongoDB 6.0.10 or greater + * @note Requires MongoDB 6.0.11 or greater * @since 4.11 */ @Sealed @Beta(Array(Reason.SERVER)) type VectorSearchOptions = com.mongodb.client.model.search.VectorSearchOptions + /** + * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. + *

      + * Configures approximate vector search for Atlas Vector Search to enable searches that may not return the exact closest vectors. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] + * @note Requires MongoDB 6.0.11, 7.0.2 or greater + * @since 5.2 + */ + @Sealed + @Beta(Array(Reason.SERVER)) + type ApproximateVectorSearchOptions = com.mongodb.client.model.search.ApproximateVectorSearchOptions + + /** + * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. + *

      + * Configures exact vector search for Atlas Vector Search to enable precise matching, ensuring that + * results are the closest vectors to a given query vector. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] + * @note Requires MongoDB 6.0.16, 7.0.10, 7.3.2 or greater + * @since 5.2 + */ + @Sealed + @Beta(Array(Reason.SERVER)) + type ExactVectorSearchOptions = com.mongodb.client.model.search.ExactVectorSearchOptions + /** * Highlighting options. * You may use the `\$meta: "searchHighlights"` expression, e.g., via [[Projections.metaSearchHighlights]], diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/AggregatesSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/AggregatesSpec.scala index 18a24b085bf..25152a22d97 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/model/AggregatesSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/AggregatesSpec.scala @@ -41,7 +41,7 @@ import org.mongodb.scala.model.search.SearchCollector import org.mongodb.scala.model.search.SearchOperator.exists import org.mongodb.scala.model.search.SearchOptions.searchOptions import org.mongodb.scala.model.search.SearchPath.{ fieldPath, wildcardPath } -import org.mongodb.scala.model.search.VectorSearchOptions.vectorSearchOptions +import org.mongodb.scala.model.search.VectorSearchOptions.{ approximateVectorSearchOptions, exactVectorSearchOptions } import org.mongodb.scala.{ BaseSpec, MongoClient, MongoNamespace } class AggregatesSpec extends BaseSpec { @@ -763,15 +763,14 @@ class AggregatesSpec extends BaseSpec { ) } - it should "render $vectorSearch" in { + it should "render approximate $vectorSearch" in { toBson( Aggregates.vectorSearch( fieldPath("fieldName").multi("ignored"), List(1.0d, 2.0d), "indexName", - 2, 1, - vectorSearchOptions() + approximateVectorSearchOptions(2) .filter(Filters.ne("fieldName", "fieldValue")) ) ) should equal( @@ -790,14 +789,15 @@ class AggregatesSpec extends BaseSpec { ) } - it should "render $vectorSearch with no options" in { + it should "render exact $vectorSearch" in { toBson( Aggregates.vectorSearch( fieldPath("fieldName").multi("ignored"), List(1.0d, 2.0d), "indexName", - 2, - 1 + 1, + exactVectorSearchOptions() + .filter(Filters.ne("fieldName", "fieldValue")) ) ) should equal( Document( @@ -806,8 +806,9 @@ class AggregatesSpec extends BaseSpec { "path": "fieldName", "queryVector": [1.0, 2.0], "index": "indexName", - "numCandidates": {"$numberLong": "2"}, - "limit": {"$numberLong": "1"} + "exact": true, + "limit": {"$numberLong": "1"}, + "filter": {"fieldName": {"$ne": "fieldValue"}} } }""" ) From 86863c9473f9adc74cd1176486276b662d86052e Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Thu, 29 Aug 2024 11:21:04 +0100 Subject: [PATCH 70/90] Add .sdkmanrc to .gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index ebebd4073e5..6398e8490e8 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,9 @@ local.properties # jenv .java-version +#sdkman +.sdkmanrc + # mongocryptd **/mongocryptd*.pid From c4af1c5fe20a88807bf4fa9c0071f6126424ade3 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Thu, 29 Aug 2024 17:33:25 -0700 Subject: [PATCH 71/90] Add support for kotlinx-datetime serializers mapping to BSON (#1462) - Add kotlinx-datetime serializers that map to BSON as the expected types. - Add kotlinx-datetime as optional dependency. - Make it easily configurable via `@Contextual` annotation. JAVA-5330 --------- Co-authored-by: Ross Lawley --- bson-kotlinx/build.gradle.kts | 8 + .../bson/codecs/kotlinx/BsonSerializers.kt | 2 +- .../codecs/kotlinx/DateTimeSerializers.kt | 216 ++++++++++++++++++ .../kotlinx/utils/SerializationModuleUtils.kt | 28 +++ .../kotlinx/KotlinSerializerCodecTest.kt | 46 ++++ .../codecs/kotlinx/samples/DataClasses.kt | 20 ++ gradle/publish.gradle | 2 + 7 files changed, 321 insertions(+), 1 deletion(-) create mode 100644 bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/DateTimeSerializers.kt create mode 100644 bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/SerializationModuleUtils.kt diff --git a/bson-kotlinx/build.gradle.kts b/bson-kotlinx/build.gradle.kts index bb9dd42e10b..5f707239581 100644 --- a/bson-kotlinx/build.gradle.kts +++ b/bson-kotlinx/build.gradle.kts @@ -38,6 +38,12 @@ description = "Bson Kotlinx Codecs" ext.set("pomName", "Bson Kotlinx") +ext.set("kotlinxDatetimeVersion", "0.4.0") + +val kotlinxDatetimeVersion: String by ext + +java { registerFeature("dateTimeSupport") { usingSourceSet(sourceSets["main"]) } } + dependencies { // Align versions of all Kotlin components implementation(platform("org.jetbrains.kotlin:kotlin-bom")) @@ -45,12 +51,14 @@ dependencies { implementation(platform("org.jetbrains.kotlinx:kotlinx-serialization-bom:1.5.0")) implementation("org.jetbrains.kotlinx:kotlinx-serialization-core") + "dateTimeSupportImplementation"("org.jetbrains.kotlinx:kotlinx-datetime:$kotlinxDatetimeVersion") api(project(path = ":bson", configuration = "default")) implementation("org.jetbrains.kotlin:kotlin-reflect") testImplementation("org.jetbrains.kotlin:kotlin-test-junit") testImplementation(project(path = ":driver-core", configuration = "default")) + testImplementation("org.jetbrains.kotlinx:kotlinx-datetime:$kotlinxDatetimeVersion") } kotlin { explicitApi() } diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonSerializers.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonSerializers.kt index 05d84b65987..26c19c0fe17 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonSerializers.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonSerializers.kt @@ -61,7 +61,7 @@ import org.bson.types.ObjectId */ @ExperimentalSerializationApi public val defaultSerializersModule: SerializersModule = - ObjectIdSerializer.serializersModule + BsonValueSerializer.serializersModule + ObjectIdSerializer.serializersModule + BsonValueSerializer.serializersModule + dateTimeSerializersModule @ExperimentalSerializationApi @Serializer(forClass = ObjectId::class) diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/DateTimeSerializers.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/DateTimeSerializers.kt new file mode 100644 index 00000000000..e3e228ecbfb --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/DateTimeSerializers.kt @@ -0,0 +1,216 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import java.time.ZoneOffset +import kotlinx.datetime.Instant +import kotlinx.datetime.LocalDate +import kotlinx.datetime.LocalDateTime +import kotlinx.datetime.LocalTime +import kotlinx.datetime.TimeZone +import kotlinx.datetime.UtcOffset +import kotlinx.datetime.atDate +import kotlinx.datetime.atStartOfDayIn +import kotlinx.datetime.toInstant +import kotlinx.datetime.toLocalDateTime +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.KSerializer +import kotlinx.serialization.SerializationException +import kotlinx.serialization.descriptors.PrimitiveKind +import kotlinx.serialization.descriptors.PrimitiveSerialDescriptor +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.encoding.Decoder +import kotlinx.serialization.encoding.Encoder +import kotlinx.serialization.modules.SerializersModule +import kotlinx.serialization.modules.plus +import org.bson.BsonDateTime +import org.bson.codecs.kotlinx.utils.SerializationModuleUtils.isClassAvailable + +/** + * The default serializers module + * + * Handles: + * - ObjectId serialization + * - BsonValue serialization + * - Instant serialization + * - LocalDate serialization + * - LocalDateTime serialization + * - LocalTime serialization + */ +@ExperimentalSerializationApi +public val dateTimeSerializersModule: SerializersModule by lazy { + var module = SerializersModule {} + if (isClassAvailable("kotlinx.datetime.Instant")) { + module += + InstantAsBsonDateTime.serializersModule + + LocalDateAsBsonDateTime.serializersModule + + LocalDateTimeAsBsonDateTime.serializersModule + + LocalTimeAsBsonDateTime.serializersModule + } + module +} + +/** + * Instant KSerializer. + * + * Encodes and decodes `Instant` objects to and from `BsonDateTime`. Data is extracted via + * [kotlinx.datetime.Instant.fromEpochMilliseconds] and stored to millisecond accuracy. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object InstantAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = PrimitiveSerialDescriptor("InstantAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: Instant) { + when (encoder) { + is BsonEncoder -> encoder.encodeBsonValue(BsonDateTime(value.toEpochMilliseconds())) + else -> throw SerializationException("Instant is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): Instant { + return when (decoder) { + is BsonDecoder -> Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + else -> throw SerializationException("Instant is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(Instant::class, InstantAsBsonDateTime as KSerializer) + } +} + +/** + * LocalDate KSerializer. + * + * Encodes and decodes `LocalDate` objects to and from `BsonDateTime`. + * + * Converts the `LocalDate` values to and from `UTC`. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object LocalDateAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = + PrimitiveSerialDescriptor("LocalDateAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: LocalDate) { + when (encoder) { + is BsonEncoder -> { + val epochMillis = value.atStartOfDayIn(TimeZone.UTC).toEpochMilliseconds() + encoder.encodeBsonValue(BsonDateTime(epochMillis)) + } + else -> throw SerializationException("LocalDate is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): LocalDate { + return when (decoder) { + is BsonDecoder -> + Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + .toLocalDateTime(TimeZone.UTC) + .date + else -> throw SerializationException("LocalDate is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(LocalDate::class, LocalDateAsBsonDateTime as KSerializer) + } +} + +/** + * LocalDateTime KSerializer. + * + * Encodes and decodes `LocalDateTime` objects to and from `BsonDateTime`. Data is stored to millisecond accuracy. + * + * Converts the `LocalDateTime` values to and from `UTC`. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object LocalDateTimeAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = + PrimitiveSerialDescriptor("LocalDateTimeAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: LocalDateTime) { + when (encoder) { + is BsonEncoder -> { + val epochMillis = value.toInstant(UtcOffset(ZoneOffset.UTC)).toEpochMilliseconds() + encoder.encodeBsonValue(BsonDateTime(epochMillis)) + } + else -> throw SerializationException("LocalDateTime is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): LocalDateTime { + return when (decoder) { + is BsonDecoder -> + Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + .toLocalDateTime(TimeZone.UTC) + else -> throw SerializationException("LocalDateTime is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(LocalDateTime::class, LocalDateTimeAsBsonDateTime as KSerializer) + } +} + +/** + * LocalTime KSerializer. + * + * Encodes and decodes `LocalTime` objects to and from `BsonDateTime`. Data is stored to millisecond accuracy. + * + * Converts the `LocalTime` values to and from EpochDay at `UTC`. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object LocalTimeAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = + PrimitiveSerialDescriptor("LocalTimeAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: LocalTime) { + when (encoder) { + is BsonEncoder -> { + val epochMillis = + value.atDate(LocalDate.fromEpochDays(0)).toInstant(UtcOffset(ZoneOffset.UTC)).toEpochMilliseconds() + encoder.encodeBsonValue(BsonDateTime(epochMillis)) + } + else -> throw SerializationException("LocalTime is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): LocalTime { + return when (decoder) { + is BsonDecoder -> + Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + .toLocalDateTime(TimeZone.UTC) + .time + else -> throw SerializationException("LocalTime is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(LocalTime::class, LocalTimeAsBsonDateTime as KSerializer) + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/SerializationModuleUtils.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/SerializationModuleUtils.kt new file mode 100644 index 00000000000..306644c81ad --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/SerializationModuleUtils.kt @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx.utils + +internal object SerializationModuleUtils { + @Suppress("SwallowedException") + fun isClassAvailable(className: String): Boolean { + return try { + Class.forName(className) + true + } catch (e: ClassNotFoundException) { + false + } + } +} diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt index 05a0d3ffd7d..e9d3742db10 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt @@ -17,6 +17,10 @@ package org.bson.codecs.kotlinx import java.util.stream.Stream import kotlin.test.assertEquals +import kotlinx.datetime.Instant +import kotlinx.datetime.LocalDate +import kotlinx.datetime.LocalDateTime +import kotlinx.datetime.LocalTime import kotlinx.serialization.ExperimentalSerializationApi import kotlinx.serialization.MissingFieldException import kotlinx.serialization.SerializationException @@ -72,7 +76,9 @@ import org.bson.codecs.kotlinx.samples.DataClassWithBsonIgnore import org.bson.codecs.kotlinx.samples.DataClassWithBsonProperty import org.bson.codecs.kotlinx.samples.DataClassWithBsonRepresentation import org.bson.codecs.kotlinx.samples.DataClassWithCollections +import org.bson.codecs.kotlinx.samples.DataClassWithContextualDateValues import org.bson.codecs.kotlinx.samples.DataClassWithDataClassMapKey +import org.bson.codecs.kotlinx.samples.DataClassWithDateValues import org.bson.codecs.kotlinx.samples.DataClassWithDefaults import org.bson.codecs.kotlinx.samples.DataClassWithEmbedded import org.bson.codecs.kotlinx.samples.DataClassWithEncodeDefault @@ -198,6 +204,46 @@ class KotlinSerializerCodecTest { assertDecodesTo(data, expectedDataClass) } + @Test + fun testDataClassWithDateValuesContextualSerialization() { + val expected = + "{\n" + + " \"instant\": {\"\$date\": \"2001-09-09T01:46:40Z\"}, \n" + + " \"localTime\": {\"\$date\": \"1970-01-01T00:00:10Z\"}, \n" + + " \"localDateTime\": {\"\$date\": \"2021-01-01T00:00:04Z\"}, \n" + + " \"localDate\": {\"\$date\": \"1970-10-28T00:00:00Z\"}\n" + + "}".trimMargin() + + val expectedDataClass = + DataClassWithContextualDateValues( + Instant.fromEpochMilliseconds(10_000_000_000_00), + LocalTime.fromMillisecondOfDay(10_000), + LocalDateTime.parse("2021-01-01T00:00:04"), + LocalDate.fromEpochDays(300)) + + assertRoundTrips(expected, expectedDataClass) + } + + @Test + fun testDataClassWithDateValuesStandard() { + val expected = + "{\n" + + " \"instant\": \"1970-01-01T00:00:01Z\", \n" + + " \"localTime\": \"00:00:01\", \n" + + " \"localDateTime\": \"2021-01-01T00:00:04\", \n" + + " \"localDate\": \"1970-01-02\"\n" + + "}".trimMargin() + + val expectedDataClass = + DataClassWithDateValues( + Instant.fromEpochMilliseconds(1000), + LocalTime.fromMillisecondOfDay(1000), + LocalDateTime.parse("2021-01-01T00:00:04"), + LocalDate.fromEpochDays(1)) + + assertRoundTrips(expected, expectedDataClass) + } + @Test fun testDataClassWithComplexTypes() { val expected = diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt index 66907bff103..cbdf41ab2f3 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt @@ -15,6 +15,10 @@ */ package org.bson.codecs.kotlinx.samples +import kotlinx.datetime.Instant +import kotlinx.datetime.LocalDate +import kotlinx.datetime.LocalDateTime +import kotlinx.datetime.LocalTime import kotlinx.serialization.Contextual import kotlinx.serialization.EncodeDefault import kotlinx.serialization.ExperimentalSerializationApi @@ -63,6 +67,22 @@ data class DataClassWithSimpleValues( val string: String ) +@Serializable +data class DataClassWithContextualDateValues( + @Contextual val instant: Instant, + @Contextual val localTime: LocalTime, + @Contextual val localDateTime: LocalDateTime, + @Contextual val localDate: LocalDate, +) + +@Serializable +data class DataClassWithDateValues( + val instant: Instant, + val localTime: LocalTime, + val localDateTime: LocalDateTime, + val localDate: LocalDate, +) + @Serializable data class DataClassWithCollections( val listSimple: List, diff --git a/gradle/publish.gradle b/gradle/publish.gradle index 498184db983..25edda53f49 100644 --- a/gradle/publish.gradle +++ b/gradle/publish.gradle @@ -100,6 +100,8 @@ configure(javaProjects) { project -> artifact sourcesJar artifact javadocJar + suppressPomMetadataWarningsFor("dateTimeSupportApiElements") + suppressPomMetadataWarningsFor("dateTimeRuntimeElements") } } From fc7084d89f77472b0dc0cb720f3ac7e3a40df87d Mon Sep 17 00:00:00 2001 From: Maxim Katcharov Date: Fri, 30 Aug 2024 09:29:03 -0600 Subject: [PATCH 72/90] JAVA-5505 minor refactoring (#1488) JAVA-5505 --- .../async/AsynchronousTlsChannel.java | 28 +++++++++---------- .../async/AsynchronousTlsChannelGroup.java | 6 +++- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannel.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannel.java index 5419c526ffe..04114318f92 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannel.java +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannel.java @@ -98,8 +98,8 @@ public void read(ByteBuffer dst, A attach, CompletionHandler group.executor.submit(() -> handler.completed((int) c, attach)), - e -> group.executor.submit(() -> handler.failed(e, attach))); + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); } @Override @@ -119,8 +119,8 @@ public void read( new ByteBufferSet(dst), timeout, unit, - c -> group.executor.submit(() -> handler.completed((int) c, attach)), - e -> group.executor.submit(() -> handler.failed(e, attach))); + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); } @Override @@ -145,8 +145,8 @@ public void read( bufferSet, timeout, unit, - c -> group.executor.submit(() -> handler.completed(c, attach)), - e -> group.executor.submit(() -> handler.failed(e, attach))); + c -> group.submit(() -> handler.completed(c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); } @Override @@ -185,8 +185,8 @@ public void write(ByteBuffer src, A attach, CompletionHandler group.executor.submit(() -> handler.completed((int) c, attach)), - e -> group.executor.submit(() -> handler.failed(e, attach))); + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); } @Override @@ -205,8 +205,8 @@ public void write( new ByteBufferSet(src), timeout, unit, - c -> group.executor.submit(() -> handler.completed((int) c, attach)), - e -> group.executor.submit(() -> handler.failed(e, attach))); + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); } @Override @@ -228,8 +228,8 @@ public void write( bufferSet, timeout, unit, - c -> group.executor.submit(() -> handler.completed(c, attach)), - e -> group.executor.submit(() -> handler.failed(e, attach))); + c -> group.submit(() -> handler.completed(c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); } @Override @@ -251,11 +251,11 @@ public Future write(ByteBuffer src) { } private void completeWithZeroInt(A attach, CompletionHandler handler) { - group.executor.submit(() -> handler.completed(0, attach)); + group.submit(() -> handler.completed(0, attach)); } private void completeWithZeroLong(A attach, CompletionHandler handler) { - group.executor.submit(() -> handler.completed(0L, attach)); + group.submit(() -> handler.completed(0L, attach)); } /** diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannelGroup.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannelGroup.java index 6ba89b5157a..2b34226ebac 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannelGroup.java +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannelGroup.java @@ -159,7 +159,7 @@ static final class WriteOperation extends Operation { private final Selector selector; - final ExecutorService executor; + private final ExecutorService executor; private final ScheduledThreadPoolExecutor timeoutExecutor = new ScheduledThreadPoolExecutor( @@ -228,6 +228,10 @@ public AsynchronousTlsChannelGroup() { this(Runtime.getRuntime().availableProcessors()); } + void submit(final Runnable r) { + executor.submit(r); + } + RegisteredSocket registerSocket(TlsChannel reader, SocketChannel socketChannel) { if (shutdown != Shutdown.No) { throw new ShutdownChannelGroupException(); From d86837ef4696cb9d2b1aa2744ee7900bd6a2f519 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Wed, 11 Sep 2024 10:48:05 -0700 Subject: [PATCH 73/90] Update unified CSOT tests. (#1494) JAVA-5489 --- .../client-side-operation-timeout/close-cursors.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json index a8b2d724fa9..79b0de7b6aa 100644 --- a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json +++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 200 + "blockTimeMS": 250 } } } @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], From 03f32ea4a36413486e4ec1acee9ca289be9289ed Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Fri, 13 Sep 2024 14:54:51 +0100 Subject: [PATCH 74/90] Added kotlinx.json JsonElement serialization support (#1459) * Added kotlinx.json JsonElement serialization support JAVA-5239 Co-authored-by: Mark --- bson-kotlinx/build.gradle.kts | 7 +- .../org/bson/codecs/kotlinx/BsonDecoder.kt | 169 ++++++++---- .../org/bson/codecs/kotlinx/BsonEncoder.kt | 76 +++-- .../bson/codecs/kotlinx/JsonBsonDecoder.kt | 152 ++++++++++ .../bson/codecs/kotlinx/JsonBsonEncoder.kt | 132 +++++++++ .../codecs/kotlinx/KotlinSerializerCodec.kt | 4 +- .../kotlinx/KotlinSerializerCodecTest.kt | 260 +++++++++++++++++- .../codecs/kotlinx/samples/DataClasses.kt | 20 ++ gradle/publish.gradle | 3 + 9 files changed, 745 insertions(+), 78 deletions(-) create mode 100644 bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonDecoder.kt create mode 100644 bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonEncoder.kt diff --git a/bson-kotlinx/build.gradle.kts b/bson-kotlinx/build.gradle.kts index 5f707239581..a278b4a3ab2 100644 --- a/bson-kotlinx/build.gradle.kts +++ b/bson-kotlinx/build.gradle.kts @@ -42,7 +42,10 @@ ext.set("kotlinxDatetimeVersion", "0.4.0") val kotlinxDatetimeVersion: String by ext -java { registerFeature("dateTimeSupport") { usingSourceSet(sourceSets["main"]) } } +java { + registerFeature("dateTimeSupport") { usingSourceSet(sourceSets["main"]) } + registerFeature("jsonSupport") { usingSourceSet(sourceSets["main"]) } +} dependencies { // Align versions of all Kotlin components @@ -52,6 +55,7 @@ dependencies { implementation(platform("org.jetbrains.kotlinx:kotlinx-serialization-bom:1.5.0")) implementation("org.jetbrains.kotlinx:kotlinx-serialization-core") "dateTimeSupportImplementation"("org.jetbrains.kotlinx:kotlinx-datetime:$kotlinxDatetimeVersion") + "jsonSupportImplementation"("org.jetbrains.kotlinx:kotlinx-serialization-json") api(project(path = ":bson", configuration = "default")) implementation("org.jetbrains.kotlin:kotlin-reflect") @@ -59,6 +63,7 @@ dependencies { testImplementation("org.jetbrains.kotlin:kotlin-test-junit") testImplementation(project(path = ":driver-core", configuration = "default")) testImplementation("org.jetbrains.kotlinx:kotlinx-datetime:$kotlinxDatetimeVersion") + testImplementation("org.jetbrains.kotlinx:kotlinx-serialization-json") } kotlin { explicitApi() } diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt index 38d9c23309f..68ecbbabc13 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt @@ -27,6 +27,7 @@ import kotlinx.serialization.encoding.AbstractDecoder import kotlinx.serialization.encoding.CompositeDecoder import kotlinx.serialization.encoding.CompositeDecoder.Companion.DECODE_DONE import kotlinx.serialization.encoding.CompositeDecoder.Companion.UNKNOWN_NAME +import kotlinx.serialization.encoding.Decoder import kotlinx.serialization.modules.SerializersModule import org.bson.AbstractBsonReader import org.bson.BsonInvalidOperationException @@ -36,6 +37,10 @@ import org.bson.BsonType import org.bson.BsonValue import org.bson.codecs.BsonValueCodec import org.bson.codecs.DecoderContext +import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonArrayDecoder +import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonDocumentDecoder +import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonMapDecoder +import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonPolymorphicDecoder import org.bson.internal.NumberCodecHelper import org.bson.internal.StringCodecHelper import org.bson.types.ObjectId @@ -45,34 +50,93 @@ import org.bson.types.ObjectId * * For custom serialization handlers */ -public sealed interface BsonDecoder { +@ExperimentalSerializationApi +internal sealed interface BsonDecoder : Decoder, CompositeDecoder { + + /** Factory helper for creating concrete BsonDecoder implementations */ + companion object { + + @Suppress("SwallowedException") + private val hasJsonDecoder: Boolean by lazy { + try { + Class.forName("kotlinx.serialization.json.JsonDecoder") + true + } catch (e: ClassNotFoundException) { + false + } + } + + fun createBsonDecoder( + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonDecoder { + return if (hasJsonDecoder) JsonBsonDecoderImpl(reader, serializersModule, configuration) + else BsonDecoderImpl(reader, serializersModule, configuration) + } + + fun createBsonArrayDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonArrayDecoder { + return if (hasJsonDecoder) JsonBsonArrayDecoder(descriptor, reader, serializersModule, configuration) + else BsonArrayDecoder(descriptor, reader, serializersModule, configuration) + } + + fun createBsonDocumentDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonDocumentDecoder { + return if (hasJsonDecoder) JsonBsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + else BsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + } + + fun createBsonPolymorphicDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonPolymorphicDecoder { + return if (hasJsonDecoder) JsonBsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + else BsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + } + + fun createBsonMapDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonMapDecoder { + return if (hasJsonDecoder) JsonBsonMapDecoder(descriptor, reader, serializersModule, configuration) + else BsonMapDecoder(descriptor, reader, serializersModule, configuration) + } + } /** @return the decoded ObjectId */ - public fun decodeObjectId(): ObjectId + fun decodeObjectId(): ObjectId /** @return the decoded BsonValue */ - public fun decodeBsonValue(): BsonValue - - /** @return the BsonReader */ - public fun reader(): BsonReader + fun decodeBsonValue(): BsonValue } -@ExperimentalSerializationApi -internal open class DefaultBsonDecoder( - internal val reader: AbstractBsonReader, +@OptIn(ExperimentalSerializationApi::class) +internal sealed class AbstractBsonDecoder( + val reader: AbstractBsonReader, override val serializersModule: SerializersModule, - internal val configuration: BsonConfiguration + val configuration: BsonConfiguration ) : BsonDecoder, AbstractDecoder() { - private data class ElementMetadata(val name: String, val nullable: Boolean, var processed: Boolean = false) - private var elementsMetadata: Array? = null - private var currentIndex: Int = UNKNOWN_INDEX - companion object { - val validKeyKinds = setOf(PrimitiveKind.STRING, PrimitiveKind.CHAR, SerialKind.ENUM) + val bsonValueCodec = BsonValueCodec() const val UNKNOWN_INDEX = -10 + val validKeyKinds = setOf(PrimitiveKind.STRING, PrimitiveKind.CHAR, SerialKind.ENUM) + fun validateCurrentBsonType( - reader: AbstractBsonReader, + reader: BsonReader, expectedType: BsonType, descriptor: SerialDescriptor, actualType: (descriptor: SerialDescriptor) -> String = { it.kind.toString() } @@ -87,6 +151,10 @@ internal open class DefaultBsonDecoder( } } + private data class ElementMetadata(val name: String, val nullable: Boolean, var processed: Boolean = false) + private var elementsMetadata: Array? = null + private var currentIndex: Int = UNKNOWN_INDEX + private fun initElementMetadata(descriptor: SerialDescriptor) { if (this.elementsMetadata != null) return val elementsMetadata = @@ -134,14 +202,13 @@ internal open class DefaultBsonDecoder( ?: UNKNOWN_NAME } - @Suppress("ReturnCount") override fun beginStructure(descriptor: SerialDescriptor): CompositeDecoder { return when (descriptor.kind) { - is StructureKind.LIST -> BsonArrayDecoder(descriptor, reader, serializersModule, configuration) - is PolymorphicKind -> PolymorphicDecoder(descriptor, reader, serializersModule, configuration) + is PolymorphicKind -> createBsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + is StructureKind.LIST -> createBsonArrayDecoder(descriptor, reader, serializersModule, configuration) is StructureKind.CLASS, - StructureKind.OBJECT -> BsonDocumentDecoder(descriptor, reader, serializersModule, configuration) - is StructureKind.MAP -> MapDecoder(descriptor, reader, serializersModule, configuration) + StructureKind.OBJECT -> createBsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + is StructureKind.MAP -> createBsonMapDecoder(descriptor, reader, serializersModule, configuration) else -> throw SerializationException("Primitives are not supported at top-level") } } @@ -152,18 +219,15 @@ internal open class DefaultBsonDecoder( is StructureKind.MAP, StructureKind.CLASS, StructureKind.OBJECT -> reader.readEndDocument() - else -> super.endStructure(descriptor) + else -> {} } } override fun decodeByte(): Byte = NumberCodecHelper.decodeByte(reader) - override fun decodeChar(): Char = StringCodecHelper.decodeChar(reader) override fun decodeFloat(): Float = NumberCodecHelper.decodeFloat(reader) - override fun decodeShort(): Short = NumberCodecHelper.decodeShort(reader) override fun decodeBoolean(): Boolean = reader.readBoolean() - override fun decodeDouble(): Double = NumberCodecHelper.decodeDouble(reader) override fun decodeInt(): Int = NumberCodecHelper.decodeInt(reader) override fun decodeLong(): Long = NumberCodecHelper.decodeLong(reader) @@ -183,7 +247,6 @@ internal open class DefaultBsonDecoder( override fun decodeObjectId(): ObjectId = readOrThrow({ reader.readObjectId() }, BsonType.OBJECT_ID) override fun decodeBsonValue(): BsonValue = bsonValueCodec.decode(reader, DecoderContext.builder().build()) - override fun reader(): BsonReader = reader private inline fun readOrThrow(action: () -> T, bsonType: BsonType): T { return try { @@ -197,13 +260,20 @@ internal open class DefaultBsonDecoder( } } -@OptIn(ExperimentalSerializationApi::class) -private class BsonArrayDecoder( +/** The default Bson Decoder implementation */ +internal open class BsonDecoderImpl( + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : AbstractBsonDecoder(reader, serializersModule, configuration) + +/** The Bson array decoder */ +internal open class BsonArrayDecoder( descriptor: SerialDescriptor, reader: AbstractBsonReader, serializersModule: SerializersModule, configuration: BsonConfiguration -) : DefaultBsonDecoder(reader, serializersModule, configuration) { +) : AbstractBsonDecoder(reader, serializersModule, configuration) { init { validateCurrentBsonType(reader, BsonType.ARRAY, descriptor) @@ -218,13 +288,29 @@ private class BsonArrayDecoder( } } +/** The Bson document decoder */ @OptIn(ExperimentalSerializationApi::class) -private class PolymorphicDecoder( +internal open class BsonDocumentDecoder( descriptor: SerialDescriptor, reader: AbstractBsonReader, serializersModule: SerializersModule, configuration: BsonConfiguration -) : DefaultBsonDecoder(reader, serializersModule, configuration) { +) : AbstractBsonDecoder(reader, serializersModule, configuration) { + + init { + validateCurrentBsonType(reader, BsonType.DOCUMENT, descriptor) { it.serialName } + reader.readStartDocument() + } +} + +/** The Bson polymorphic class decoder */ +@OptIn(ExperimentalSerializationApi::class) +internal open class BsonPolymorphicDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : AbstractBsonDecoder(reader, serializersModule, configuration) { private var index = 0 private var mark: BsonReaderMark? @@ -239,7 +325,7 @@ private class PolymorphicDecoder( it.reset() mark = null } - return deserializer.deserialize(DefaultBsonDecoder(reader, serializersModule, configuration)) + return deserializer.deserialize(BsonDecoder.createBsonDecoder(reader, serializersModule, configuration)) } override fun decodeElementIndex(descriptor: SerialDescriptor): Int { @@ -266,27 +352,14 @@ private class PolymorphicDecoder( } } +/** The Bson map decoder */ @OptIn(ExperimentalSerializationApi::class) -private class BsonDocumentDecoder( - descriptor: SerialDescriptor, - reader: AbstractBsonReader, - serializersModule: SerializersModule, - configuration: BsonConfiguration -) : DefaultBsonDecoder(reader, serializersModule, configuration) { - init { - validateCurrentBsonType(reader, BsonType.DOCUMENT, descriptor) { it.serialName } - reader.readStartDocument() - } -} - -@OptIn(ExperimentalSerializationApi::class) -private class MapDecoder( +internal open class BsonMapDecoder( descriptor: SerialDescriptor, reader: AbstractBsonReader, serializersModule: SerializersModule, configuration: BsonConfiguration -) : DefaultBsonDecoder(reader, serializersModule, configuration) { - +) : AbstractBsonDecoder(reader, serializersModule, configuration) { private var index = 0 private var isKey = false diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt index 75080254cdb..899b1b7a981 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt @@ -25,6 +25,7 @@ import kotlinx.serialization.descriptors.SerialKind import kotlinx.serialization.descriptors.StructureKind import kotlinx.serialization.encoding.AbstractEncoder import kotlinx.serialization.encoding.CompositeEncoder +import kotlinx.serialization.encoding.Encoder import kotlinx.serialization.modules.SerializersModule import org.bson.BsonValue import org.bson.BsonWriter @@ -37,31 +38,57 @@ import org.bson.types.ObjectId * * For custom serialization handlers */ -public sealed interface BsonEncoder { +@ExperimentalSerializationApi +internal sealed interface BsonEncoder : Encoder, CompositeEncoder { + + /** Factory helper for creating concrete BsonEncoder implementations */ + companion object { + @Suppress("SwallowedException") + private val hasJsonEncoder: Boolean by lazy { + try { + Class.forName("kotlinx.serialization.json.JsonEncoder") + true + } catch (e: ClassNotFoundException) { + false + } + } + + fun createBsonEncoder( + writer: BsonWriter, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonEncoder { + return if (hasJsonEncoder) JsonBsonEncoder(writer, serializersModule, configuration) + else BsonEncoderImpl(writer, serializersModule, configuration) + } + } /** * Encodes an ObjectId * * @param value the ObjectId */ - public fun encodeObjectId(value: ObjectId) + fun encodeObjectId(value: ObjectId) /** * Encodes a BsonValue * * @param value the BsonValue */ - public fun encodeBsonValue(value: BsonValue) - - /** @return the BsonWriter */ - public fun writer(): BsonWriter + fun encodeBsonValue(value: BsonValue) } -@ExperimentalSerializationApi -internal class DefaultBsonEncoder( - private val writer: BsonWriter, +/** + * The default BsonEncoder implementation + * + * Unlike BsonDecoder implementations, state is shared when encoding, so a single class is used to encode Bson Arrays, + * Documents, Polymorphic types and Maps. + */ +@OptIn(ExperimentalSerializationApi::class) +internal open class BsonEncoderImpl( + val writer: BsonWriter, override val serializersModule: SerializersModule, - private val configuration: BsonConfiguration + val configuration: BsonConfiguration ) : BsonEncoder, AbstractEncoder() { companion object { @@ -72,19 +99,19 @@ internal class DefaultBsonEncoder( private var isPolymorphic = false private var state = STATE.VALUE private var mapState = MapState() - private val deferredElementHandler: DeferredElementHandler = DeferredElementHandler() + internal val deferredElementHandler: DeferredElementHandler = DeferredElementHandler() override fun shouldEncodeElementDefault(descriptor: SerialDescriptor, index: Int): Boolean = configuration.encodeDefaults override fun beginStructure(descriptor: SerialDescriptor): CompositeEncoder { when (descriptor.kind) { - is StructureKind.LIST -> writer.writeStartArray() is PolymorphicKind -> { writer.writeStartDocument() writer.writeName(configuration.classDiscriminator) isPolymorphic = true } + is StructureKind.LIST -> writer.writeStartArray() is StructureKind.CLASS, StructureKind.OBJECT -> { if (isPolymorphic) { @@ -99,7 +126,7 @@ internal class DefaultBsonEncoder( } else -> throw SerializationException("Primitives are not supported at top-level") } - return super.beginStructure(descriptor) + return this } override fun endStructure(descriptor: SerialDescriptor) { @@ -108,7 +135,7 @@ internal class DefaultBsonEncoder( StructureKind.MAP, StructureKind.CLASS, StructureKind.OBJECT -> writer.writeEndDocument() - else -> super.endStructure(descriptor) + else -> {} } } @@ -146,10 +173,10 @@ internal class DefaultBsonEncoder( // See: https://youtrack.jetbrains.com/issue/KT-66206 if (value != null || configuration.explicitNulls) { encodeName(it) - super.encodeSerializableValue(serializer, value) + super.encodeSerializableValue(serializer, value) } }, - { super.encodeSerializableValue(serializer, value) }) + { super.encodeSerializableValue(serializer, value) }) } override fun encodeNullableSerializableValue(serializer: SerializationStrategy, value: T?) { @@ -157,10 +184,10 @@ internal class DefaultBsonEncoder( { if (value != null || configuration.explicitNulls) { encodeName(it) - super.encodeNullableSerializableValue(serializer, value) + super.encodeNullableSerializableValue(serializer, value) } }, - { super.encodeNullableSerializableValue(serializer, value) }) + { super.encodeNullableSerializableValue(serializer, value) }) } override fun encodeByte(value: Byte) = encodeInt(value.toInt()) @@ -176,7 +203,7 @@ internal class DefaultBsonEncoder( override fun encodeString(value: String) { when (state) { - STATE.NAME -> encodeName(value) + STATE.NAME -> deferredElementHandler.set(value) STATE.VALUE -> writer.writeString(value) } } @@ -197,9 +224,7 @@ internal class DefaultBsonEncoder( bsonValueCodec.encode(writer, value, EncoderContext.builder().build()) } - override fun writer(): BsonWriter = writer - - private fun encodeName(value: Any) { + internal fun encodeName(value: Any) { writer.writeName(value.toString()) state = STATE.VALUE } @@ -211,7 +236,6 @@ internal class DefaultBsonEncoder( private class MapState { var currentState: STATE = STATE.VALUE - fun getState(): STATE = currentState fun nextState(): STATE { @@ -224,15 +248,15 @@ internal class DefaultBsonEncoder( } } - private class DeferredElementHandler { + internal class DeferredElementHandler { private var deferredElementName: String? = null fun set(name: String) { - assert(deferredElementName == null) { -> "Overwriting an existing deferred name" } + assert(deferredElementName == null) { "Overwriting an existing deferred name" } deferredElementName = name } - fun with(actionWithDeferredElement: (String) -> Unit, actionWithoutDeferredElement: () -> Unit): Unit { + fun with(actionWithDeferredElement: (String) -> Unit, actionWithoutDeferredElement: () -> Unit) { deferredElementName?.let { reset() actionWithDeferredElement(it) diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonDecoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonDecoder.kt new file mode 100644 index 00000000000..4b0eee8213a --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonDecoder.kt @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import java.util.Base64 +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonDecoder +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.buildJsonArray +import kotlinx.serialization.json.buildJsonObject +import kotlinx.serialization.modules.SerializersModule +import org.bson.AbstractBsonReader +import org.bson.BsonBinarySubType +import org.bson.BsonType +import org.bson.UuidRepresentation +import org.bson.internal.UuidHelper + +@OptIn(ExperimentalSerializationApi::class) +internal interface JsonBsonDecoder : BsonDecoder, JsonDecoder { + val reader: AbstractBsonReader + val configuration: BsonConfiguration + + fun json(): Json = Json { + explicitNulls = configuration.explicitNulls + encodeDefaults = configuration.encodeDefaults + classDiscriminator = configuration.classDiscriminator + serializersModule = this@JsonBsonDecoder.serializersModule + } + + @Suppress("ComplexMethod") + override fun decodeJsonElement(): JsonElement = + reader.run { + when (currentBsonType) { + BsonType.DOCUMENT -> readJsonObject() + BsonType.ARRAY -> readJsonArray() + BsonType.NULL -> JsonPrimitive(decodeNull()) + BsonType.STRING -> JsonPrimitive(decodeString()) + BsonType.BOOLEAN -> JsonPrimitive(decodeBoolean()) + BsonType.INT32 -> JsonPrimitive(decodeInt()) + BsonType.INT64 -> JsonPrimitive(decodeLong()) + BsonType.DOUBLE -> JsonPrimitive(decodeDouble()) + BsonType.DECIMAL128 -> JsonPrimitive(reader.readDecimal128()) + BsonType.OBJECT_ID -> JsonPrimitive(decodeObjectId().toHexString()) + BsonType.DATE_TIME -> JsonPrimitive(reader.readDateTime()) + BsonType.TIMESTAMP -> JsonPrimitive(reader.readTimestamp().value) + BsonType.BINARY -> { + val subtype = reader.peekBinarySubType() + val data = reader.readBinaryData().data + when (subtype) { + BsonBinarySubType.UUID_LEGACY.value -> + JsonPrimitive( + UuidHelper.decodeBinaryToUuid(data, subtype, UuidRepresentation.JAVA_LEGACY).toString()) + BsonBinarySubType.UUID_STANDARD.value -> + JsonPrimitive( + UuidHelper.decodeBinaryToUuid(data, subtype, UuidRepresentation.STANDARD).toString()) + else -> JsonPrimitive(Base64.getEncoder().encodeToString(data)) + } + } + else -> error("Unsupported json type: $currentBsonType") + } + } + + private fun readJsonObject(): JsonObject { + reader.readStartDocument() + val obj = buildJsonObject { + var type = reader.readBsonType() + while (type != BsonType.END_OF_DOCUMENT) { + put(reader.readName(), decodeJsonElement()) + type = reader.readBsonType() + } + } + + reader.readEndDocument() + return obj + } + + private fun readJsonArray(): JsonArray { + reader.readStartArray() + val array = buildJsonArray { + var type = reader.readBsonType() + while (type != BsonType.END_OF_DOCUMENT) { + add(decodeJsonElement()) + type = reader.readBsonType() + } + } + + reader.readEndArray() + return array + } +} + +internal class JsonBsonDecoderImpl( + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonDecoderImpl(reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonArrayDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonArrayDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonDocumentDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonDocumentDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonPolymorphicDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonMapDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonMapDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonEncoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonEncoder.kt new file mode 100644 index 00000000000..6cff36a0909 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonEncoder.kt @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import java.math.BigDecimal +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.SerializationStrategy +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonEncoder +import kotlinx.serialization.json.JsonNull +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.double +import kotlinx.serialization.json.int +import kotlinx.serialization.json.long +import kotlinx.serialization.modules.SerializersModule +import org.bson.BsonWriter +import org.bson.types.Decimal128 + +@OptIn(ExperimentalSerializationApi::class) +internal class JsonBsonEncoder( + writer: BsonWriter, + override val serializersModule: SerializersModule, + configuration: BsonConfiguration, +) : BsonEncoderImpl(writer, serializersModule, configuration), JsonEncoder { + + companion object { + private val DOUBLE_MIN_VALUE = BigDecimal.valueOf(Double.MIN_VALUE) + private val DOUBLE_MAX_VALUE = BigDecimal.valueOf(Double.MAX_VALUE) + private val INT_MIN_VALUE = BigDecimal.valueOf(Int.MIN_VALUE.toLong()) + private val INT_MAX_VALUE = BigDecimal.valueOf(Int.MAX_VALUE.toLong()) + private val LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE) + private val LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE) + } + + override val json = Json { + explicitNulls = configuration.explicitNulls + encodeDefaults = configuration.encodeDefaults + classDiscriminator = configuration.classDiscriminator + serializersModule = this@JsonBsonEncoder.serializersModule + } + + override fun encodeSerializableValue(serializer: SerializationStrategy, value: T) { + if (value is JsonElement) encodeJsonElement(value) + else super.encodeSerializableValue(serializer, value) + } + + override fun encodeJsonElement(element: JsonElement) { + deferredElementHandler.with( + { + when (element) { + is JsonNull -> + if (configuration.explicitNulls) { + encodeName(it) + encodeNull() + } + is JsonPrimitive -> { + encodeName(it) + encodeJsonPrimitive(element) + } + is JsonObject -> { + encodeName(it) + encodeJsonObject(element) + } + is JsonArray -> { + encodeName(it) + encodeJsonArray(element) + } + } + }, + { + when (element) { + is JsonNull -> if (configuration.explicitNulls) encodeNull() + is JsonPrimitive -> encodeJsonPrimitive(element) + is JsonObject -> encodeJsonObject(element) + is JsonArray -> encodeJsonArray(element) + } + }) + } + + private fun encodeJsonPrimitive(primitive: JsonPrimitive) { + val content = primitive.content + when { + primitive.isString -> encodeString(content) + content == "true" || content == "false" -> encodeBoolean(content.toBooleanStrict()) + else -> { + val decimal = BigDecimal(content) + when { + decimal.scale() != 0 -> + if (DOUBLE_MIN_VALUE <= decimal && decimal <= DOUBLE_MAX_VALUE) { + encodeDouble(primitive.double) + } else { + writer.writeDecimal128(Decimal128(decimal)) + } + INT_MIN_VALUE <= decimal && decimal <= INT_MAX_VALUE -> encodeInt(primitive.int) + LONG_MIN_VALUE <= decimal && decimal <= LONG_MAX_VALUE -> encodeLong(primitive.long) + else -> writer.writeDecimal128(Decimal128(decimal)) + } + } + } + } + + private fun encodeJsonObject(obj: JsonObject) { + writer.writeStartDocument() + obj.forEach { k, v -> + deferredElementHandler.set(k) + encodeJsonElement(v) + } + writer.writeEndDocument() + } + + private fun encodeJsonArray(array: JsonArray) { + writer.writeStartArray() + array.forEach(::encodeJsonElement) + writer.writeEndArray() + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt index 30d40fe6f31..41e674568a5 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt @@ -172,13 +172,13 @@ private constructor( } override fun encode(writer: BsonWriter, value: T, encoderContext: EncoderContext) { - serializer.serialize(DefaultBsonEncoder(writer, serializersModule, bsonConfiguration), value) + serializer.serialize(BsonEncoder.createBsonEncoder(writer, serializersModule, bsonConfiguration), value) } override fun getEncoderClass(): Class = kClass.java override fun decode(reader: BsonReader, decoderContext: DecoderContext): T { require(reader is AbstractBsonReader) - return serializer.deserialize(DefaultBsonDecoder(reader, serializersModule, bsonConfiguration)) + return serializer.deserialize(BsonDecoder.createBsonDecoder(reader, serializersModule, bsonConfiguration)) } } diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt index e9d3742db10..aa749368e04 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt @@ -15,6 +15,8 @@ */ package org.bson.codecs.kotlinx +import java.math.BigDecimal +import java.util.Base64 import java.util.stream.Stream import kotlin.test.assertEquals import kotlinx.datetime.Instant @@ -24,6 +26,10 @@ import kotlinx.datetime.LocalTime import kotlinx.serialization.ExperimentalSerializationApi import kotlinx.serialization.MissingFieldException import kotlinx.serialization.SerializationException +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.buildJsonArray +import kotlinx.serialization.json.buildJsonObject +import kotlinx.serialization.json.put import kotlinx.serialization.modules.SerializersModule import kotlinx.serialization.modules.plus import kotlinx.serialization.modules.polymorphic @@ -85,6 +91,9 @@ import org.bson.codecs.kotlinx.samples.DataClassWithEncodeDefault import org.bson.codecs.kotlinx.samples.DataClassWithEnum import org.bson.codecs.kotlinx.samples.DataClassWithEnumMapKey import org.bson.codecs.kotlinx.samples.DataClassWithFailingInit +import org.bson.codecs.kotlinx.samples.DataClassWithJsonElement +import org.bson.codecs.kotlinx.samples.DataClassWithJsonElements +import org.bson.codecs.kotlinx.samples.DataClassWithJsonElementsNullable import org.bson.codecs.kotlinx.samples.DataClassWithListThatLastItemDefaultsToNull import org.bson.codecs.kotlinx.samples.DataClassWithMutableList import org.bson.codecs.kotlinx.samples.DataClassWithMutableMap @@ -102,6 +111,8 @@ import org.bson.codecs.kotlinx.samples.DataClassWithTriple import org.bson.codecs.kotlinx.samples.Key import org.bson.codecs.kotlinx.samples.SealedInterface import org.bson.codecs.kotlinx.samples.ValueClass +import org.bson.json.JsonMode +import org.bson.json.JsonWriterSettings import org.junit.jupiter.api.Test import org.junit.jupiter.api.assertThrows import org.junit.jupiter.params.ParameterizedTest @@ -111,6 +122,8 @@ import org.junit.jupiter.params.provider.MethodSource @Suppress("LargeClass") class KotlinSerializerCodecTest { private val oid = "\$oid" + private val numberLong = "\$numberLong" + private val numberDecimal = "\$numberDecimal" private val emptyDocument = "{}" private val altConfiguration = BsonConfiguration(encodeDefaults = false, classDiscriminator = "_t", explicitNulls = true) @@ -128,7 +141,7 @@ class KotlinSerializerCodecTest { | "binary": {"${'$'}binary": {"base64": "S2Fma2Egcm9ja3Mh", "subType": "00"}}, | "boolean": true, | "code": {"${'$'}code": "int i = 0;"}, - | "codeWithScope": {"${'$'}code": "int x = y", "${'$'}scope": {"y": {"${'$'}numberInt": "1"}}}, + | "codeWithScope": {"${'$'}code": "int x = y", "${'$'}scope": {"y": 1}}, | "dateTime": {"${'$'}date": {"${'$'}numberLong": "1577836801000"}}, | "decimal128": {"${'$'}numberDecimal": "1.0"}, | "documentEmpty": {}, @@ -148,6 +161,14 @@ class KotlinSerializerCodecTest { .trimMargin() private val allBsonTypesDocument = BsonDocument.parse(allBsonTypesJson) + private val jsonAllSupportedTypesDocument: BsonDocument by + lazy { + val doc = BsonDocument.parse(allBsonTypesJson) + listOf("minKey", "maxKey", "code", "codeWithScope", "regex", "symbol", "undefined").forEach { + doc.remove(it) + } + doc + } companion object { @JvmStatic @@ -799,6 +820,233 @@ class KotlinSerializerCodecTest { assertRoundTrips(expected, dataClass) } + @Test + fun testDataClassWithJsonElement() { + val expected = + """{"value": { + |"char": "c", + |"byte": 0, + |"short": 1, + |"int": 22, + |"long": {"$numberLong": "3000000000"}, + |"decimal": {"$numberDecimal": "10000000000000000000"} + |"decimal2": {"$numberDecimal": "3.1230E+700"} + |"float": 4.0, + |"double": 4.2, + |"boolean": true, + |"string": "String" + |}}""" + .trimMargin() + + val dataClass = + DataClassWithJsonElement( + buildJsonObject { + put("char", "c") + put("byte", 0) + put("short", 1) + put("int", 22) + put("long", 3_000_000_000) + put("decimal", BigDecimal("10000000000000000000")) + put("decimal2", BigDecimal("3.1230E+700")) + put("float", 4.0) + put("double", 4.2) + put("boolean", true) + put("string", "String") + }) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithJsonElements() { + val expected = + """{ + | "jsonElement": {"string": "String"}, + | "jsonArray": [1, 2], + | "jsonElements": [{"string": "String"}, {"int": 42}], + | "jsonNestedMap": {"nestedString": {"string": "String"}, + | "nestedLong": {"long": {"$numberLong": "3000000000"}}} + |}""" + .trimMargin() + + val dataClass = + DataClassWithJsonElements( + buildJsonObject { put("string", "String") }, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + }, + listOf(buildJsonObject { put("string", "String") }, buildJsonObject { put("int", 42) }), + mapOf( + Pair("nestedString", buildJsonObject { put("string", "String") }), + Pair("nestedLong", buildJsonObject { put("long", 3000000000L) }))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithJsonElementsNullable() { + val expected = + """{ + | "jsonElement": {"null": null}, + | "jsonArray": [1, 2, null], + | "jsonElements": [{"null": null}], + | "jsonNestedMap": {"nestedNull": null} + |}""" + .trimMargin() + + val dataClass = + DataClassWithJsonElementsNullable( + buildJsonObject { put("null", null) }, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(null)) + }, + listOf(buildJsonObject { put("null", null) }), + mapOf(Pair("nestedNull", null))) + + assertRoundTrips(expected, dataClass, altConfiguration) + + val expectedNoNulls = + """{ + | "jsonElement": {}, + | "jsonArray": [1, 2], + | "jsonElements": [{}], + | "jsonNestedMap": {} + |}""" + .trimMargin() + + val dataClassNoNulls = + DataClassWithJsonElementsNullable( + buildJsonObject {}, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + }, + listOf(buildJsonObject {}), + mapOf()) + assertEncodesTo(expectedNoNulls, dataClass) + assertDecodesTo(expectedNoNulls, dataClassNoNulls) + } + + @Test + fun testDataClassWithJsonElementNullSupport() { + val expected = + """{"jsonElement": {"null": null}, + | "jsonArray": [1, 2, null], + | "jsonElements": [{"null": null}], + | "jsonNestedMap": {"nestedNull": null} + | } + | """ + .trimMargin() + + val dataClass = + DataClassWithJsonElements( + buildJsonObject { put("null", null) }, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(null)) + }, + listOf(buildJsonObject { put("null", null) }), + mapOf(Pair("nestedNull", JsonPrimitive(null)))) + + assertRoundTrips(expected, dataClass, altConfiguration) + + val expectedNoNulls = + """{"jsonElement": {}, + | "jsonArray": [1, 2], + | "jsonElements": [{}], + | "jsonNestedMap": {} + | } + | """ + .trimMargin() + + val dataClassNoNulls = + DataClassWithJsonElements( + buildJsonObject {}, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + }, + listOf(buildJsonObject {}), + mapOf()) + assertEncodesTo(expectedNoNulls, dataClass) + assertDecodesTo(expectedNoNulls, dataClassNoNulls) + } + + @Test + @Suppress("LongMethod") + fun testDataClassWithJsonElementBsonSupport() { + val dataClassWithAllSupportedJsonTypes = + DataClassWithJsonElement( + buildJsonObject { + put("id", "111111111111111111111111") + put("arrayEmpty", buildJsonArray {}) + put( + "arraySimple", + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(3)) + }) + put( + "arrayComplex", + buildJsonArray { + add(buildJsonObject { put("a", JsonPrimitive(1)) }) + add(buildJsonObject { put("a", JsonPrimitive(2)) }) + }) + put( + "arrayMixedTypes", + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(true)) + add( + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(3)) + }) + add(buildJsonObject { put("a", JsonPrimitive(2)) }) + }) + put( + "arrayComplexMixedTypes", + buildJsonArray { + add(buildJsonObject { put("a", JsonPrimitive(1)) }) + add(buildJsonObject { put("a", JsonPrimitive("a")) }) + }) + put("binary", JsonPrimitive("S2Fma2Egcm9ja3Mh")) + put("boolean", JsonPrimitive(true)) + put("dateTime", JsonPrimitive(1577836801000)) + put("decimal128", JsonPrimitive(1.0)) + put("documentEmpty", buildJsonObject {}) + put("document", buildJsonObject { put("a", JsonPrimitive(1)) }) + put("double", JsonPrimitive(62.0)) + put("int32", JsonPrimitive(42)) + put("int64", JsonPrimitive(52)) + put("objectId", JsonPrimitive("211111111111111111111112")) + put("string", JsonPrimitive("the fox ...")) + put("timestamp", JsonPrimitive(1311768464867721221)) + }) + + val jsonWriterSettings = + JsonWriterSettings.builder() + .outputMode(JsonMode.RELAXED) + .objectIdConverter { oid, writer -> writer.writeString(oid.toHexString()) } + .dateTimeConverter { d, writer -> writer.writeNumber(d.toString()) } + .timestampConverter { ts, writer -> writer.writeNumber(ts.value.toString()) } + .binaryConverter { b, writer -> writer.writeString(Base64.getEncoder().encodeToString(b.data)) } + .decimal128Converter { d, writer -> writer.writeNumber(d.toDouble().toString()) } + .build() + val dataClassWithAllSupportedJsonTypesSimpleJson = jsonAllSupportedTypesDocument.toJson(jsonWriterSettings) + + assertEncodesTo( + """{"value": $dataClassWithAllSupportedJsonTypesSimpleJson }""", dataClassWithAllSupportedJsonTypes) + assertDecodesTo("""{"value": $jsonAllSupportedTypesDocument}""", dataClassWithAllSupportedJsonTypes) + } + @Test fun testDataFailures() { assertThrows("Missing data") { @@ -896,6 +1144,7 @@ class KotlinSerializerCodecTest { ): BsonDocument { val expected = BsonDocument.parse(json) val actual = serialize(value, serializersModule, configuration) + println(actual.toJson()) assertEquals(expected, actual) return actual } @@ -913,6 +1162,15 @@ class KotlinSerializerCodecTest { return document } + private inline fun assertDecodesTo( + value: String, + expected: T, + serializersModule: SerializersModule = defaultSerializersModule, + configuration: BsonConfiguration = BsonConfiguration() + ) { + assertDecodesTo(BsonDocument.parse(value), expected, serializersModule, configuration) + } + private inline fun assertDecodesTo( value: BsonDocument, expected: T, diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt index cbdf41ab2f3..e7a06600d20 100644 --- a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt @@ -25,6 +25,8 @@ import kotlinx.serialization.ExperimentalSerializationApi import kotlinx.serialization.Required import kotlinx.serialization.SerialName import kotlinx.serialization.Serializable +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonElement import org.bson.BsonArray import org.bson.BsonBinary import org.bson.BsonBoolean @@ -324,3 +326,21 @@ data class DataClassWithFailingInit(val id: String) { @Serializable data class Box(val boxed: T) @Serializable data class DataClassWithNullableGeneric(val box: Box) + +@Serializable data class DataClassWithJsonElement(val value: JsonElement) + +@Serializable +data class DataClassWithJsonElements( + val jsonElement: JsonElement, + val jsonArray: JsonArray, + val jsonElements: List, + val jsonNestedMap: Map +) + +@Serializable +data class DataClassWithJsonElementsNullable( + val jsonElement: JsonElement?, + val jsonArray: JsonArray?, + val jsonElements: List?, + val jsonNestedMap: Map? +) diff --git a/gradle/publish.gradle b/gradle/publish.gradle index 25edda53f49..07f43f762bd 100644 --- a/gradle/publish.gradle +++ b/gradle/publish.gradle @@ -102,6 +102,9 @@ configure(javaProjects) { project -> suppressPomMetadataWarningsFor("dateTimeSupportApiElements") suppressPomMetadataWarningsFor("dateTimeRuntimeElements") + + suppressPomMetadataWarningsFor("jsonSupportApiElements") + suppressPomMetadataWarningsFor("jsonSupportRuntimeElements") } } From b31e7c254ae5641ad240a855e603a94939c5fc42 Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Thu, 19 Sep 2024 16:39:49 +0100 Subject: [PATCH 75/90] Fix gradle pom metadata warning (#1498) --- gradle/publish.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle/publish.gradle b/gradle/publish.gradle index 07f43f762bd..a89b20845a3 100644 --- a/gradle/publish.gradle +++ b/gradle/publish.gradle @@ -101,7 +101,7 @@ configure(javaProjects) { project -> artifact javadocJar suppressPomMetadataWarningsFor("dateTimeSupportApiElements") - suppressPomMetadataWarningsFor("dateTimeRuntimeElements") + suppressPomMetadataWarningsFor("dateTimeSupportRuntimeElements") suppressPomMetadataWarningsFor("jsonSupportApiElements") suppressPomMetadataWarningsFor("jsonSupportRuntimeElements") From 7e3108b43fd241b66b596be9642ac23c7c9261d5 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Fri, 20 Sep 2024 06:24:23 -0700 Subject: [PATCH 76/90] Integrate mongodb-crypt module (#1487) * Integrate mongodb-crypt module into mongo-java-driver as a new Gradle project. * Add runtimeElements to GraalVM script, as this configuration is meant to be used by consumers, to retrieve all the elements necessary to run against this library (imitating transitive dependency resolution). JAVA-5582 Co-authored-by: Ross Lawley --- build.gradle | 1 - config/spotbugs/exclude.xml | 6 + driver-benchmarks/build.gradle | 1 + .../benchmark/benchmarks/BenchmarkSuite.java | 14 + .../framework/BenchmarkResultWriter.java | 2 + .../EvergreenBenchmarkResultWriter.java | 26 + ...MinimalTextBasedBenchmarkResultWriter.java | 6 + .../framework/MongoCryptBenchmarkRunner.java | 224 ++++ .../framework/MongocryptBecnhmarkResult.java | 84 ++ .../TextBasedBenchmarkResultWriter.java | 14 + .../src/resources/keyDocument.json | 24 + driver-core/build.gradle | 2 +- graalvm-native-image-app/build.gradle | 3 +- gradle/publish.gradle | 3 + mongodb-crypt/build.gradle.kts | 187 +++ .../com/mongodb/crypt/capi/BinaryHolder.java | 45 + .../src/main/com/mongodb/crypt/capi/CAPI.java | 1165 +++++++++++++++++ .../com/mongodb/crypt/capi/CAPIHelper.java | 94 ++ .../mongodb/crypt/capi/CipherCallback.java | 92 ++ .../mongodb/crypt/capi/DisposableMemory.java | 31 + .../com/mongodb/crypt/capi/JULLogger.java | 130 ++ .../main/com/mongodb/crypt/capi/Logger.java | 144 ++ .../main/com/mongodb/crypt/capi/Loggers.java | 50 + .../com/mongodb/crypt/capi/MacCallback.java | 60 + .../crypt/capi/MessageDigestCallback.java | 55 + .../capi/MongoAwsKmsProviderOptions.java | 104 ++ .../com/mongodb/crypt/capi/MongoCrypt.java | 100 ++ .../mongodb/crypt/capi/MongoCryptContext.java | 137 ++ .../crypt/capi/MongoCryptContextImpl.java | 164 +++ .../crypt/capi/MongoCryptException.java | 67 + .../mongodb/crypt/capi/MongoCryptImpl.java | 423 ++++++ .../mongodb/crypt/capi/MongoCryptOptions.java | 284 ++++ .../com/mongodb/crypt/capi/MongoCrypts.java | 42 + .../crypt/capi/MongoDataKeyOptions.java | 125 ++ .../capi/MongoExplicitEncryptOptions.java | 227 ++++ .../mongodb/crypt/capi/MongoKeyDecryptor.java | 76 ++ .../crypt/capi/MongoKeyDecryptorImpl.java | 104 ++ .../capi/MongoLocalKmsProviderOptions.java | 83 ++ .../capi/MongoRewrapManyDataKeyOptions.java | 104 ++ .../com/mongodb/crypt/capi/SLF4JLogger.java | 110 ++ .../crypt/capi/SecureRandomCallback.java | 51 + .../crypt/capi/SigningRSAESPKCSCallback.java | 73 ++ .../com/mongodb/crypt/capi/package-info.java | 21 + .../META-INF/native-image/jni-config.json | 180 +++ .../META-INF/native-image/reflect-config.json | 134 ++ .../mongodb/crypt/capi/MongoCryptTest.java | 388 ++++++ .../src/test/resources/collection-info.json | 37 + .../src/test/resources/command-reply.json | 13 + mongodb-crypt/src/test/resources/command.json | 6 + .../resources/encrypted-command-reply.json | 16 + .../src/test/resources/encrypted-command.json | 11 + .../src/test/resources/encrypted-value.json | 6 + .../int32/encrypted-payload.json | 26 + .../int32/key-filter.json | 19 + .../int32/rangeopts.json | 14 + .../int32/value-to-encrypt.json | 20 + .../src/test/resources/json-schema.json | 15 + .../src/test/resources/key-document.json | 36 + .../test/resources/key-filter-keyAltName.json | 14 + .../src/test/resources/key-filter.json | 19 + ...3498761234123456789012-local-document.json | 30 + .../src/test/resources/kms-reply.txt | 6 + .../resources/list-collections-filter.json | 3 + .../test/resources/mongocryptd-command.json | 22 + .../src/test/resources/mongocryptd-reply.json | 18 + settings.gradle | 1 + 66 files changed, 5789 insertions(+), 3 deletions(-) create mode 100644 driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java create mode 100644 driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongocryptBecnhmarkResult.java create mode 100644 driver-benchmarks/src/resources/keyDocument.json create mode 100644 mongodb-crypt/build.gradle.kts create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/BinaryHolder.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPI.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPIHelper.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/CipherCallback.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/DisposableMemory.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/JULLogger.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/Logger.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/Loggers.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MacCallback.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MessageDigestCallback.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoAwsKmsProviderOptions.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypt.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContext.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContextImpl.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptImpl.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptOptions.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypts.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoDataKeyOptions.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoExplicitEncryptOptions.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptor.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptorImpl.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoLocalKmsProviderOptions.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoRewrapManyDataKeyOptions.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/SLF4JLogger.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/SecureRandomCallback.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/SigningRSAESPKCSCallback.java create mode 100644 mongodb-crypt/src/main/com/mongodb/crypt/capi/package-info.java create mode 100644 mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json create mode 100644 mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json create mode 100644 mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java create mode 100644 mongodb-crypt/src/test/resources/collection-info.json create mode 100644 mongodb-crypt/src/test/resources/command-reply.json create mode 100644 mongodb-crypt/src/test/resources/command.json create mode 100644 mongodb-crypt/src/test/resources/encrypted-command-reply.json create mode 100644 mongodb-crypt/src/test/resources/encrypted-command.json create mode 100644 mongodb-crypt/src/test/resources/encrypted-value.json create mode 100644 mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/encrypted-payload.json create mode 100644 mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/key-filter.json create mode 100644 mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/rangeopts.json create mode 100644 mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/value-to-encrypt.json create mode 100644 mongodb-crypt/src/test/resources/json-schema.json create mode 100644 mongodb-crypt/src/test/resources/key-document.json create mode 100644 mongodb-crypt/src/test/resources/key-filter-keyAltName.json create mode 100644 mongodb-crypt/src/test/resources/key-filter.json create mode 100644 mongodb-crypt/src/test/resources/keys/ABCDEFAB123498761234123456789012-local-document.json create mode 100644 mongodb-crypt/src/test/resources/kms-reply.txt create mode 100644 mongodb-crypt/src/test/resources/list-collections-filter.json create mode 100644 mongodb-crypt/src/test/resources/mongocryptd-command.json create mode 100644 mongodb-crypt/src/test/resources/mongocryptd-reply.json diff --git a/build.gradle b/build.gradle index 86fe2ad12d4..543e6de19ce 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,6 @@ ext { zstdVersion = '1.5.5-3' awsSdkV2Version = '2.18.9' awsSdkV1Version = '1.12.337' - mongoCryptVersion = '1.11.0' projectReactorVersion = '2022.0.0' junitBomVersion = '5.10.2' logbackVersion = '1.3.14' diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml index fedf0c72566..9ce5b944cb4 100644 --- a/config/spotbugs/exclude.xml +++ b/config/spotbugs/exclude.xml @@ -260,4 +260,10 @@ + + + + + + diff --git a/driver-benchmarks/build.gradle b/driver-benchmarks/build.gradle index 960674011eb..91d979cff68 100644 --- a/driver-benchmarks/build.gradle +++ b/driver-benchmarks/build.gradle @@ -31,6 +31,7 @@ sourceSets { dependencies { api project(':driver-sync') + api project(':mongodb-crypt') implementation "ch.qos.logback:logback-classic:$logbackVersion" } diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BenchmarkSuite.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BenchmarkSuite.java index 08dce238b70..2260e0ed80a 100644 --- a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BenchmarkSuite.java +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BenchmarkSuite.java @@ -22,6 +22,8 @@ import com.mongodb.benchmark.framework.BenchmarkResultWriter; import com.mongodb.benchmark.framework.BenchmarkRunner; import com.mongodb.benchmark.framework.EvergreenBenchmarkResultWriter; +import com.mongodb.benchmark.framework.MongoCryptBenchmarkRunner; +import com.mongodb.benchmark.framework.MongocryptBecnhmarkResult; import org.bson.Document; import org.bson.codecs.Codec; @@ -56,6 +58,7 @@ public static void main(String[] args) throws Exception { private static void runBenchmarks() throws Exception { + runMongoCryptBenchMarks(); runBenchmark(new BsonEncodingBenchmark<>("Flat", "extended_bson/flat_bson.json", DOCUMENT_CODEC)); runBenchmark(new BsonEncodingBenchmark<>("Deep", "extended_bson/deep_bson.json", DOCUMENT_CODEC)); runBenchmark(new BsonEncodingBenchmark<>("Full", "extended_bson/full_bson.json", DOCUMENT_CODEC)); @@ -87,6 +90,17 @@ private static void runBenchmarks() runBenchmark(new GridFSMultiFileDownloadBenchmark()); } + private static void runMongoCryptBenchMarks() throws InterruptedException { + // This runner has been migrated from libmongocrypt as it is. + List results = new MongoCryptBenchmarkRunner().run(); + + for (BenchmarkResultWriter writer : WRITERS) { + for (MongocryptBecnhmarkResult result : results) { + writer.write(result); + } + } + } + private static void runBenchmark(final Benchmark benchmark) throws Exception { long startTime = System.currentTimeMillis(); BenchmarkResult benchmarkResult = new BenchmarkRunner(benchmark, NUM_WARMUP_ITERATIONS, NUM_ITERATIONS, MIN_TIME_SECONDS, diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResultWriter.java index d7f4a4701ce..26828a5a75f 100644 --- a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResultWriter.java +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResultWriter.java @@ -21,4 +21,6 @@ public interface BenchmarkResultWriter extends Closeable { void write(BenchmarkResult benchmarkResult); + + void write(MongocryptBecnhmarkResult result); } diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/EvergreenBenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/EvergreenBenchmarkResultWriter.java index 719bf269163..f1e5361ffeb 100644 --- a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/EvergreenBenchmarkResultWriter.java +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/EvergreenBenchmarkResultWriter.java @@ -65,6 +65,32 @@ public void write(final BenchmarkResult benchmarkResult) { jsonWriter.writeEndDocument(); } + @Override + public void write(final MongocryptBecnhmarkResult result) { + jsonWriter.writeStartDocument(); + + jsonWriter.writeStartDocument("info"); + jsonWriter.writeString("test_name", result.getTestName()); + + jsonWriter.writeStartDocument("args"); + jsonWriter.writeInt32("threads", result.getThreadCount()); + jsonWriter.writeEndDocument(); + jsonWriter.writeEndDocument(); + + jsonWriter.writeString("created_at", result.getCreatedAt()); + jsonWriter.writeString("completed_at", result.getCompletedAt()); + jsonWriter.writeStartArray("metrics"); + + jsonWriter.writeStartDocument(); + jsonWriter.writeString("name", result.getMetricName()); + jsonWriter.writeString("type", result.getMetricType()); + jsonWriter.writeDouble("value", result.getMedianOpsPerSec()); + jsonWriter.writeEndDocument(); + + jsonWriter.writeEndArray(); + jsonWriter.writeEndDocument(); + } + @Override public void close() throws IOException { jsonWriter.writeEndArray(); diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MinimalTextBasedBenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MinimalTextBasedBenchmarkResultWriter.java index 73f85697f33..b5ed85f1f2e 100644 --- a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MinimalTextBasedBenchmarkResultWriter.java +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MinimalTextBasedBenchmarkResultWriter.java @@ -34,6 +34,12 @@ public void write(final BenchmarkResult benchmarkResult) { benchmarkResult.getElapsedTimeNanosAtPercentile(50) / ONE_BILLION); } + @Override + public void write(final MongocryptBecnhmarkResult result) { + printStream.printf("%s: %d%n", result.getTestName(), + result.getMedianOpsPerSec()); + } + @Override public void close() { } diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java new file mode 100644 index 00000000000..33b6c0ad102 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java @@ -0,0 +1,224 @@ +package com.mongodb.benchmark.framework; + +/* + * Copyright 2023-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import com.mongodb.crypt.capi.CAPI; +import com.mongodb.crypt.capi.MongoCrypt; +import com.mongodb.crypt.capi.MongoCryptContext; +import com.mongodb.crypt.capi.MongoCryptOptions; +import com.mongodb.crypt.capi.MongoCrypts; +import com.mongodb.crypt.capi.MongoExplicitEncryptOptions; +import com.mongodb.crypt.capi.MongoLocalKmsProviderOptions; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; + +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class MongoCryptBenchmarkRunner { + static final int NUM_FIELDS = 1500; + static final int NUM_WARMUP_SECS = 2; + static final int NUM_SECS = 10; + static final byte[] LOCAL_MASTER_KEY = new byte[]{ + -99, -108, 75, 13, -109, -48, -59, 68, -91, 114, -3, 50, 27, -108, 48, -112, 35, 53, + 115, 124, -16, -10, -62, -12, -38, 35, 86, -25, -113, 4, -52, -6, -34, 117, -76, 81, + -121, -13, -117, -105, -41, 75, 68, 59, -84, 57, -94, -58, 77, -111, 0, 62, -47, -6, 74, + 48, -63, -46, -58, 94, -5, -84, 65, -14, 72, 19, 60, -101, 80, -4, -89, 36, 122, 46, 2, + 99, -93, -58, 22, 37, 81, 80, 120, 62, 15, -40, 110, -124, -90, -20, -115, 45, 36, 71, + -27, -81 + }; + + private static String getFileAsString(final String fileName) { + try { + URL resource = BenchmarkRunner.class.getResource("/" + fileName); + if (resource == null) { + throw new RuntimeException("Could not find file " + fileName); + } + return new String(Files.readAllBytes(Paths.get(resource.toURI()))); + } catch (Throwable t) { + throw new RuntimeException("Could not parse file " + fileName, t); + } + } + + private static BsonDocument getResourceAsDocument(final String fileName) { + return BsonDocument.parse(getFileAsString(fileName)); + } + + private static MongoCrypt createMongoCrypt() { + return MongoCrypts.create(MongoCryptOptions + .builder() + .localKmsProviderOptions(MongoLocalKmsProviderOptions.builder() + .localMasterKey(ByteBuffer.wrap(LOCAL_MASTER_KEY)) + .build()) + .build()); + } + + // DecryptTask decrypts a document repeatedly for a specified number of seconds and records ops/sec. + private static class DecryptTask implements Runnable { + public DecryptTask(MongoCrypt mongoCrypt, BsonDocument toDecrypt, int numSecs, CountDownLatch doneSignal) { + this.mongoCrypt = mongoCrypt; + this.toDecrypt = toDecrypt; + this.opsPerSecs = new ArrayList(numSecs); + this.numSecs = numSecs; + this.doneSignal = doneSignal; + } + + public void run() { + for (int i = 0; i < numSecs; i++) { + long opsPerSec = 0; + long start = System.nanoTime(); + // Run for one second. + while (System.nanoTime() - start < 1_000_000_000) { + try (MongoCryptContext ctx = mongoCrypt.createDecryptionContext(toDecrypt)) { + assert ctx.getState() == MongoCryptContext.State.READY; + ctx.finish(); + opsPerSec++; + } + } + opsPerSecs.add(opsPerSec); + } + doneSignal.countDown(); + } + + public long getMedianOpsPerSecs() { + if (opsPerSecs.size() == 0) { + throw new IllegalStateException("opsPerSecs is empty. Was `run` called?"); + } + Collections.sort(opsPerSecs); + return opsPerSecs.get(numSecs / 2); + } + + private MongoCrypt mongoCrypt; + private BsonDocument toDecrypt; + private ArrayList opsPerSecs; + private int numSecs; + private CountDownLatch doneSignal; + } + + public List run() throws InterruptedException { + System.out.printf("BenchmarkRunner is using libmongocrypt version=%s, NUM_WARMUP_SECS=%d, NUM_SECS=%d%n", + CAPI.mongocrypt_version(null).toString(), NUM_WARMUP_SECS, NUM_SECS); + // `keyDocument` is a Data Encryption Key (DEK) encrypted with the Key Encryption Key (KEK) `LOCAL_MASTER_KEY`. + BsonDocument keyDocument = getResourceAsDocument("keyDocument.json"); + try (MongoCrypt mongoCrypt = createMongoCrypt()) { + // `encrypted` will contain encrypted fields. + BsonDocument encrypted = new BsonDocument(); + { + for (int i = 0; i < NUM_FIELDS; i++) { + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("YWFhYWFhYWFhYWFhYWFhYQ=="))) + .algorithm("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .build(); + BsonDocument toEncrypt = new BsonDocument("v", new BsonString(String.format("value %04d", i))); + try (MongoCryptContext ctx = mongoCrypt.createExplicitEncryptionContext(toEncrypt, options)) { + // If mongocrypt_t has not yet cached the DEK, supply it. + if (MongoCryptContext.State.NEED_MONGO_KEYS == ctx.getState()) { + ctx.addMongoOperationResult(keyDocument); + ctx.completeMongoOperation(); + } + assert ctx.getState() == MongoCryptContext.State.READY; + RawBsonDocument result = ctx.finish(); + BsonValue encryptedValue = result.get("v"); + String key = String.format("key%04d", i); + encrypted.append(key, encryptedValue); + } + } + } + + // Warm up benchmark and discard the result. + DecryptTask warmup = new DecryptTask(mongoCrypt, encrypted, NUM_WARMUP_SECS, new CountDownLatch(1)); + warmup.run(); + + // Decrypt `encrypted` and measure ops/sec. + // Check with varying thread counts to measure impact of a shared pool of Cipher instances. + int[] threadCounts = {1, 2, 8, 64}; + ArrayList totalMedianOpsPerSecs = new ArrayList(threadCounts.length); + ArrayList createdAts = new ArrayList(threadCounts.length); + ArrayList completedAts = new ArrayList(threadCounts.length); + + for (int threadCount : threadCounts) { + ExecutorService executorService = Executors.newFixedThreadPool(threadCount); + CountDownLatch doneSignal = new CountDownLatch(threadCount); + ArrayList decryptTasks = new ArrayList(threadCount); + createdAts.add(ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT)); + + for (int i = 0; i < threadCount; i++) { + DecryptTask decryptTask = new DecryptTask(mongoCrypt, encrypted, NUM_SECS, doneSignal); + decryptTasks.add(decryptTask); + executorService.submit(decryptTask); + } + + // Await completion of all tasks. Tasks are expected to complete shortly after NUM_SECS. Time out `await` if time exceeds 2 * NUM_SECS. + boolean ok = doneSignal.await(NUM_SECS * 2, TimeUnit.SECONDS); + assert ok; + completedAts.add(ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT)); + // Sum the median ops/secs of all tasks to get total throughput. + long totalMedianOpsPerSec = 0; + for (DecryptTask decryptTask : decryptTasks) { + totalMedianOpsPerSec += decryptTask.getMedianOpsPerSecs(); + } + System.out.printf("threadCount=%d. Decrypting 1500 fields median ops/sec : %d%n", threadCount, totalMedianOpsPerSec); + totalMedianOpsPerSecs.add(totalMedianOpsPerSec); + executorService.shutdown(); + ok = executorService.awaitTermination(NUM_SECS * 2, TimeUnit.SECONDS); + assert ok; + } + + // Print the results in JSON that can be accepted by the `perf.send` command. + // See https://docs.devprod.prod.corp.mongodb.com/evergreen/Project-Configuration/Project-Commands#perfsend for the expected `perf.send` input. + List results = new ArrayList<>(threadCounts.length); + for (int i = 0; i < threadCounts.length; i++) { + int threadCount = threadCounts[i]; + long totalMedianOpsPerSec = totalMedianOpsPerSecs.get(i); + String createdAt = createdAts.get(i); + String completedAt = completedAts.get(i); + + MongocryptBecnhmarkResult result = new MongocryptBecnhmarkResult( + "java_decrypt_1500", + threadCount, + totalMedianOpsPerSec, + createdAt, + completedAt, + "medianOpsPerSec", + "THROUGHPUT"); + + results.add(result); + } + System.out.println("Results: " + results); + return results; + } + } +} + diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongocryptBecnhmarkResult.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongocryptBecnhmarkResult.java new file mode 100644 index 00000000000..92ef999bee2 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongocryptBecnhmarkResult.java @@ -0,0 +1,84 @@ +package com.mongodb.benchmark.framework; +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +public class MongocryptBecnhmarkResult { + private final String testName; + private final int threadCount; + private final long medianOpsPerSec; + private final String createdAt; + private final String completedAt; + private final String metricName; + private final String metricType; + + public MongocryptBecnhmarkResult(final String testName, + final int threadCount, + final long medianOpsPerSec, + final String createdAt, + final String completedAt, + final String metricName, + final String metricType) { + this.testName = testName; + this.threadCount = threadCount; + this.medianOpsPerSec = medianOpsPerSec; + this.createdAt = createdAt; + this.completedAt = completedAt; + this.metricName = metricName; + this.metricType = metricType; + } + + public String getTestName() { + return testName; + } + + public int getThreadCount() { + return threadCount; + } + + public long getMedianOpsPerSec() { + return medianOpsPerSec; + } + + public String getCreatedAt() { + return createdAt; + } + + public String getCompletedAt() { + return completedAt; + } + + public String getMetricName() { + return metricName; + } + + public String getMetricType() { + return metricType; + } + + @Override + public String toString() { + return "MongocryptBecnhmarkResult{" + + "testName='" + testName + '\'' + + ", threadCount=" + threadCount + + ", medianOpsPerSec=" + medianOpsPerSec + + ", createdAt=" + createdAt + + ", completedAt=" + completedAt + + ", metricName=" + metricName + + ", metricType=" + metricType + + '}'; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/TextBasedBenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/TextBasedBenchmarkResultWriter.java index 185e518c3a0..9a29c9bd621 100644 --- a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/TextBasedBenchmarkResultWriter.java +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/TextBasedBenchmarkResultWriter.java @@ -82,6 +82,20 @@ public void write(final BenchmarkResult benchmarkResult) { printStream.println(); } + @Override + public void write(final MongocryptBecnhmarkResult result) { + printStream.println(result.getTestName()); + + printStream.println("CreatedAt: " + result.getCreatedAt()); + printStream.println("CompletedAt: " + result.getCompletedAt()); + printStream.println("ThreadCount: " + result.getThreadCount()); + printStream.println("MedianOpsPerSec: " + result.getMedianOpsPerSec()); + printStream.println("MetricType: " + result.getMetricType()); + + printStream.println(); + printStream.println(); + } + @Override public void close() { } diff --git a/driver-benchmarks/src/resources/keyDocument.json b/driver-benchmarks/src/resources/keyDocument.json new file mode 100644 index 00000000000..20d631db86c --- /dev/null +++ b/driver-benchmarks/src/resources/keyDocument.json @@ -0,0 +1,24 @@ +{ + "_id": { + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ACR7Hm33dDOAAD7l2ubZhSpSUWK8BkALUY+qW3UgBAEcTV8sBwZnaAWnzDsmrX55dgmYHWfynDlJogC/e33u6pbhyXvFTs5ow9OLCuCWBJ39T/Ivm3kMaZJybkejY0V+uc4UEdHvVVz/SbitVnzs2WXdMGmo1/HmDRrxGYZjewFslquv8wtUHF5pyB+QDlQBd/al9M444/8bJZFbMSmtIg==", + "subType": "00" + } + }, + "creationDate": { + "$date": "2023-08-21T14:28:20.875Z" + }, + "updateDate": { + "$date": "2023-08-21T14:28:20.875Z" + }, + "status": 0, + "masterKey": { + "provider": "local" + } +} \ No newline at end of file diff --git a/driver-core/build.gradle b/driver-core/build.gradle index 1f7d06f93f2..78ab607cc23 100644 --- a/driver-core/build.gradle +++ b/driver-core/build.gradle @@ -39,6 +39,7 @@ dependencies { implementation project(path: ':bson-record-codec', configuration: 'default') implementation project(path: ':bson-kotlin', configuration: 'default'), optional implementation project(path: ':bson-kotlinx', configuration: 'default'), optional + implementation project(path: ':mongodb-crypt', configuration: 'default'), optional implementation "com.github.jnr:jnr-unixsocket:$jnrUnixsocketVersion", optional api platform("io.netty:netty-bom:$nettyVersion") @@ -55,7 +56,6 @@ dependencies { implementation "org.xerial.snappy:snappy-java:$snappyVersion", optional implementation "com.github.luben:zstd-jni:$zstdVersion", optional - implementation "org.mongodb:mongodb-crypt:$mongoCryptVersion", optional testImplementation project(':bson').sourceSets.test.output testImplementation('org.junit.jupiter:junit-jupiter-api') diff --git a/graalvm-native-image-app/build.gradle b/graalvm-native-image-app/build.gradle index c34d8623b15..d6bc5a7b6cb 100644 --- a/graalvm-native-image-app/build.gradle +++ b/graalvm-native-image-app/build.gradle @@ -82,12 +82,13 @@ dependencies { implementation project(path:':driver-sync', configuration:'archives') implementation project(path:':driver-reactive-streams', configuration:'archives') implementation project(path:':driver-legacy', configuration:'archives') + implementation project(path: ':mongodb-crypt', configuration: 'archives') + implementation project(path: ':mongodb-crypt', configuration: 'runtimeElements') // note that as a result of these `sourceSets` dependencies, `driver-sync/src/test/resources/logback-test.xml` is used implementation project(':driver-core').sourceSets.test.output implementation project(':driver-sync').sourceSets.test.output implementation project(':driver-legacy').sourceSets.test.output implementation project(':driver-reactive-streams').sourceSets.test.output - implementation "org.mongodb:mongodb-crypt:$mongoCryptVersion" implementation 'org.slf4j:slf4j-api:2.0.12' implementation "ch.qos.logback:logback-classic:$logbackVersion" implementation platform("io.projectreactor:reactor-bom:$projectReactorVersion") diff --git a/gradle/publish.gradle b/gradle/publish.gradle index a89b20845a3..f72773c5ad7 100644 --- a/gradle/publish.gradle +++ b/gradle/publish.gradle @@ -105,6 +105,9 @@ configure(javaProjects) { project -> suppressPomMetadataWarningsFor("jsonSupportApiElements") suppressPomMetadataWarningsFor("jsonSupportRuntimeElements") + + suppressPomMetadataWarningsFor("mongoCryptSupportApiElements") + suppressPomMetadataWarningsFor("mongoCryptSupportRuntimeElements") } } diff --git a/mongodb-crypt/build.gradle.kts b/mongodb-crypt/build.gradle.kts new file mode 100644 index 00000000000..bf2fef544ff --- /dev/null +++ b/mongodb-crypt/build.gradle.kts @@ -0,0 +1,187 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import de.undercouch.gradle.tasks.download.Download + +buildscript { + repositories { + mavenCentral() + google() + } + dependencies { + "classpath"(group = "net.java.dev.jna", name = "jna", version = "5.11.0") + } +} + +plugins { + // Needed to download libmongocrypt from s3. + id("de.undercouch.download") version "5.6.0" +} + +group = "org.mongodb" +base.archivesName.set("mongodb-crypt") +description = "MongoDB client-side crypto support" +ext.set("pomName", "MongoCrypt") + +java { + sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_1_8 +} + +dependencies { + api(project(path = ":bson", configuration = "default")) + api("net.java.dev.jna:jna:5.11.0") + + // Tests + testImplementation("org.junit.jupiter:junit-jupiter") +} + +/* + * Jna copy or download resources + */ +val jnaDownloadsDir = "$buildDir/jnaLibs/downloads/" +val jnaResourcesDir = "$buildDir/jnaLibs/resources/" +val jnaLibPlatform: String = if (com.sun.jna.Platform.RESOURCE_PREFIX.startsWith("darwin")) "darwin" else com.sun.jna.Platform.RESOURCE_PREFIX +val jnaLibsPath: String = System.getProperty("jnaLibsPath", "${jnaResourcesDir}${jnaLibPlatform}") +val jnaResources: String = System.getProperty("jna.library.path", jnaLibsPath) + +// Download jnaLibs that match the git tag or revision to jnaResourcesBuildDir +val downloadRevision = "9a88ac5698e8e3ffcd6580b98c247f0126f26c40" // r1.11.0 +val binariesArchiveName = "libmongocrypt-java.tar.gz" + +/** + * The name of the archive includes downloadRevision to ensure that: + * - the archive is downloaded if the revision changes. + * - the archive is not downloaded if the revision is the same and archive had already been saved in build output. + */ +val localBinariesArchiveName = "libmongocrypt-java-$downloadRevision.tar.gz" + +val downloadUrl: String = "https://mciuploads.s3.amazonaws.com/libmongocrypt/java/$downloadRevision/$binariesArchiveName" + +val jnaMapping: Map = mapOf( + "rhel-62-64-bit" to "linux-x86-64", + "rhel72-zseries-test" to "linux-s390x", + "rhel-71-ppc64el" to "linux-ppc64le", + "ubuntu1604-arm64" to "linux-aarch64", + "windows-test" to "win32-x86-64", + "macos" to "darwin" +) + +sourceSets { + main { + java { + resources { + srcDirs(jnaResourcesDir) + } + } + } +} + +tasks.register("downloadJava") { + src(downloadUrl) + dest("${jnaDownloadsDir}/$localBinariesArchiveName") + overwrite(true) + /* To make sure we don't download archive with binaries if it hasn't been changed in S3 bucket since last download.*/ + onlyIfModified(true) +} + +tasks.register("unzipJava") { + /* + Clean up the directory first if the task is not UP-TO-DATE. + This can happen if the download revision has been changed and the archive is downloaded again. + */ + doFirst { + println("Cleaning up $jnaResourcesDir") + delete(jnaResourcesDir) + } + from(tarTree(resources.gzip("${jnaDownloadsDir}/$localBinariesArchiveName"))) + include(jnaMapping.keys.flatMap { + listOf("${it}/nocrypto/**/libmongocrypt.so", "${it}/lib/**/libmongocrypt.dylib", "${it}/bin/**/mongocrypt.dll" ) + }) + eachFile { + path = "${jnaMapping[path.substringBefore("/")]}/${name}" + } + into(jnaResourcesDir) + dependsOn("downloadJava") + + doLast { + println("jna.library.path contents: \n ${fileTree(jnaResourcesDir).files.joinToString(",\n ")}") + } +} + +// The `processResources` task (defined by the `java-library` plug-in) consumes files in the main source set. +// Add a dependency on `unzipJava`. `unzipJava` adds libmongocrypt libraries to the main source set. +tasks.processResources { + mustRunAfter(tasks.named("unzipJava")) +} + +tasks.register("downloadJnaLibs") { + dependsOn("downloadJava", "unzipJava") +} + +tasks.test { + systemProperty("jna.debug_load", "true") + systemProperty("jna.library.path", jnaResources) + useJUnitPlatform() + testLogging { + events("passed", "skipped", "failed") + } + + doFirst { + println("jna.library.path contents:") + println(fileTree(jnaResources) { + this.setIncludes(listOf("*.*")) + }.files.joinToString(",\n ", " ")) + } + dependsOn("downloadJnaLibs", "downloadJava", "unzipJava") +} + +tasks.withType { + description = """$description + | System properties: + | ================= + | + | jnaLibsPath : Custom local JNA library path for inclusion into the build (rather than downloading from s3) + | gitRevision : Optional Git Revision to download the built resources for from s3. + """.trimMargin() +} + +tasks.jar { + //NOTE this enables depending on the mongocrypt from driver-core + dependsOn("downloadJnaLibs") +} + +tasks.javadoc { + if (JavaVersion.current().isJava9Compatible) { + (options as StandardJavadocDocletOptions).addBooleanOption("html5", true) + } +} + +afterEvaluate { + tasks.jar { + manifest { + attributes( + "-exportcontents" to "com.mongodb.crypt.capi.*;-noimport:=true", + "Automatic-Module-Name" to "com.mongodb.crypt.capi", + "Import-Package" to "org.slf4j.*;resolution:=optional,org.bson.*", + "Bundle-Name" to "MongoCrypt", + "Bundle-SymbolicName" to "com.mongodb.crypt.capi", + "Private-Package" to "" + ) + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/BinaryHolder.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/BinaryHolder.java new file mode 100644 index 00000000000..60570bd1180 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/BinaryHolder.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; + +import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_destroy; + +// Wrap JNA memory and a mongocrypt_binary_t that references that memory, in order to ensure that the JNA Memory is not GC'd before the +// mongocrypt_binary_t is destroyed +class BinaryHolder implements AutoCloseable { + + private final DisposableMemory memory; + private final mongocrypt_binary_t binary; + + BinaryHolder(final DisposableMemory memory, final mongocrypt_binary_t binary) { + this.memory = memory; + this.binary = binary; + } + + mongocrypt_binary_t getBinary() { + return binary; + } + + @Override + public void close() { + mongocrypt_binary_destroy(binary); + memory.dispose(); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPI.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPI.java new file mode 100644 index 00000000000..d6567bdaf7c --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPI.java @@ -0,0 +1,1165 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.sun.jna.Callback; +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.PointerType; +import com.sun.jna.ptr.PointerByReference; + +//CHECKSTYLE:OFF + +/** + * For internal use only. Not part of the public API. + */ +@SuppressWarnings("WeakerAccess") +public class CAPI { + + public static class cstring extends PointerType { + public cstring() { + super(); + } + + public cstring(String string) { + Pointer m = new Memory(string.length() + 1); + m.setString(0, string); + setPointer(m); + } + + public String toString() { + return getPointer().getString(0); + } + } + + + /** + * Indicates success or contains error information. + *

      + * Functions like @ref mongocrypt_ctx_encrypt_init follow a pattern to expose a + * status. A boolean is returned. True indicates success, and false indicates + * failure. On failure a status on the handle is set, and is accessible with a + * corresponding status function. E.g. @ref mongocrypt_ctx_status. + */ + public static class mongocrypt_status_t extends PointerType { + } + + /** + * Contains all options passed on initialization of a @ref mongocrypt_ctx_t. + */ + public static class mongocrypt_opts_t extends PointerType { + } + + /** + * A non-owning view of a byte buffer. + *

      + * Functions returning a mongocrypt_binary_t* expect it to be destroyed with + * mongocrypt_binary_destroy. + */ + public static class mongocrypt_binary_t extends PointerType { + // The `mongocrypt_binary_t` struct layout is part of libmongocrypt's ABI: + // typedef struct _mongocrypt_binary_t { + // void *data; + // uint32_t len; + // } mongocrypt_binary_t; + // To improve performance, fields are read directly using `getPointer` and `getInt`. + // This results in observed performance improvements over using of `mongocrypt_binary_data` and `mongocrypt_binary_len`. Refer: MONGOCRYPT-589. + public mongocrypt_binary_t() { + super(); + } + public Pointer data() { + return this.getPointer().getPointer(0); + } + public int len() { + int len = this.getPointer().getInt(Native.POINTER_SIZE); + // mongocrypt_binary_t represents length as an unsigned `uint32_t`. + // Representing `uint32_t` values greater than INT32_MAX is represented as a negative `int`. + // Throw an exception. mongocrypt_binary_t is not expected to use lengths greater than INT32_MAX. + if (len < 0) { + throw new AssertionError( + String.format("Expected mongocrypt_binary_t length to be non-negative, got: %d", len)); + } + return len; + + } + } + + /** + * The top-level handle to libmongocrypt. + *

      + * Create a mongocrypt_t handle to perform operations within libmongocrypt: + * encryption, decryption, registering log callbacks, etc. + *

      + * Functions on a mongocrypt_t are thread safe, though functions on derived + * handle (e.g. mongocrypt_encryptor_t) are not and must be owned by a single + * thread. See each handle's documentation for thread-safety considerations. + *

      + * Multiple mongocrypt_t handles may be created. + */ + public static class mongocrypt_t extends PointerType { + } + + /** + * Manages the state machine for encryption or decryption. + */ + public static class mongocrypt_ctx_t extends PointerType { + } + + /** + * Manages a single KMS HTTP request/response. + */ + public static class mongocrypt_kms_ctx_t extends PointerType { + } + + /** + * Returns the version string x.y.z for libmongocrypt. + * + * @param len an optional length of the returned string. May be NULL. + * @return the version string x.y.z for libmongocrypt. + */ + public static native cstring + mongocrypt_version(Pointer len); + + + /** + * Create a new non-owning view of a buffer (data + length). + *

      + * Use this to create a mongocrypt_binary_t used for output parameters. + * + * @return A new mongocrypt_binary_t. + */ + public static native mongocrypt_binary_t + mongocrypt_binary_new(); + + + /** + * Create a new non-owning view of a buffer (data + length). + * + * @param data A pointer to an array of bytes. This is not copied. data must outlive the binary object. + * @param len The length of the @p data byte array. + * @return A new mongocrypt_binary_t. + */ + public static native mongocrypt_binary_t + mongocrypt_binary_new_from_data(Pointer data, int len); + + + /** + * Get a pointer to the referenced data. + * + * @param binary The @ref mongocrypt_binary_t. + * @return A pointer to the referenced data. + */ + public static native Pointer + mongocrypt_binary_data(mongocrypt_binary_t binary); + + + /** + * Get the length of the referenced data. + * + * @param binary The @ref mongocrypt_binary_t. + * @return The length of the referenced data. + */ + public static native int + mongocrypt_binary_len(mongocrypt_binary_t binary); + + + /** + * Free the @ref mongocrypt_binary_t. + *

      + * This does not free the referenced data. Refer to individual function + * documentation to determine the lifetime guarantees of the underlying + * data. + * + * @param binary The mongocrypt_binary_t destroy. + */ + public static native void + mongocrypt_binary_destroy(mongocrypt_binary_t binary); + + + public static final int MONGOCRYPT_STATUS_OK = 0; + public static final int MONGOCRYPT_STATUS_ERROR_CLIENT = 1; + public static final int MONGOCRYPT_STATUS_ERROR_KMS = 2; + + /** + * Create a new status object. + *

      + * Use a new status object to retrieve the status from a handle by passing + * this as an out-parameter to functions like @ref mongocrypt_ctx_status. + * When done, destroy it with @ref mongocrypt_status_destroy. + * + * @return A new status object. + */ + public static native mongocrypt_status_t + mongocrypt_status_new(); + + /** + * Set a status object with message, type, and code. + *

      + * Use this to set the mongocrypt_status_t given in the crypto hooks. + * + * @param status The status. + * @param type The status type. + * @param code The status code. + * @param message The message. + * @param message_len The length of @p message. Pass -1 to determine the * string length with strlen (must * be NULL terminated). + */ + public static native void + mongocrypt_status_set(mongocrypt_status_t status, + int type, + int code, + cstring message, + int message_len); + + /** + * Indicates success or the type of error. + * + * @param status The status object. + * @return A @ref mongocrypt_status_type_t. + */ + + public static native int + mongocrypt_status_type(mongocrypt_status_t status); + + + /** + * Get an error code or 0. + * + * @param status The status object. + * @return An error code. + */ + public static native int + mongocrypt_status_code(mongocrypt_status_t status); + + + /** + * Get the error message associated with a status, or an empty string. + * + * @param status The status object. + * @param len an optional length of the returned string. May be NULL. + * @return An error message or an empty string. + */ + public static native cstring + mongocrypt_status_message(mongocrypt_status_t status, Pointer len); + + + /** + * Returns true if the status indicates success. + * + * @param status The status to check. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_status_ok(mongocrypt_status_t status); + + + /** + * Free the memory for a status object. + * + * @param status The status to destroy. + */ + public static native void + mongocrypt_status_destroy(mongocrypt_status_t status); + + + public static final int MONGOCRYPT_LOG_LEVEL_FATAL = 0; + public static final int MONGOCRYPT_LOG_LEVEL_ERROR = 1; + public static final int MONGOCRYPT_LOG_LEVEL_WARNING = 2; + public static final int MONGOCRYPT_LOG_LEVEL_INFO = 3; + public static final int MONGOCRYPT_LOG_LEVEL_TRACE = 4; + + + /** + * A log callback function. Set a custom log callback with mongocrypt_setopt_log_handler. + */ + public interface mongocrypt_log_fn_t extends Callback { + void log(int level, cstring message, int message_len, Pointer ctx); + } + + public interface mongocrypt_crypto_fn extends Callback { + boolean crypt(Pointer ctx, mongocrypt_binary_t key, mongocrypt_binary_t iv, mongocrypt_binary_t in, + mongocrypt_binary_t out, Pointer bytesWritten, mongocrypt_status_t status); + } + + public interface mongocrypt_hmac_fn extends Callback { + boolean hmac(Pointer ctx, mongocrypt_binary_t key, mongocrypt_binary_t in, mongocrypt_binary_t out, + mongocrypt_status_t status); + } + + public interface mongocrypt_hash_fn extends Callback { + boolean hash(Pointer ctx, mongocrypt_binary_t in, mongocrypt_binary_t out, mongocrypt_status_t status); + } + + public interface mongocrypt_random_fn extends Callback { + boolean random(Pointer ctx, mongocrypt_binary_t out, int count, mongocrypt_status_t status); + } + + /** + * Allocate a new @ref mongocrypt_t object. + *

      + * Initialize with @ref mongocrypt_init. When done, free with @ref + * mongocrypt_destroy. + * + * @return A new @ref mongocrypt_t object. + */ + public static native mongocrypt_t + mongocrypt_new(); + + /** + * Set a handler to get called on every log message. + * + * @param crypt The @ref mongocrypt_t object. + * @param log_fn The log callback. + * @param log_ctx A context passed as an argument to the log callback every + * invokation. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_setopt_log_handler(mongocrypt_t crypt, + mongocrypt_log_fn_t log_fn, + Pointer log_ctx); + + + public static native boolean + mongocrypt_setopt_crypto_hooks(mongocrypt_t crypt, + mongocrypt_crypto_fn aes_256_cbc_encrypt, + mongocrypt_crypto_fn aes_256_cbc_decrypt, + mongocrypt_random_fn random, + mongocrypt_hmac_fn hmac_sha_512, + mongocrypt_hmac_fn hmac_sha_256, + mongocrypt_hash_fn sha_256, + Pointer ctx); + + /** + * Set a crypto hook for the AES256-CTR operations. + * + * @param crypt The @ref mongocrypt_t object. + * @param aes_256_ctr_encrypt The crypto callback function for encrypt + * operation. + * @param aes_256_ctr_decrypt The crypto callback function for decrypt + * operation. + * @param ctx A context passed as an argument to the crypto callback + * every invocation. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + * + */ + public static native boolean + mongocrypt_setopt_aes_256_ctr (mongocrypt_t crypt, + mongocrypt_crypto_fn aes_256_ctr_encrypt, + mongocrypt_crypto_fn aes_256_ctr_decrypt, + Pointer ctx); + + /** + * Set a crypto hook for the RSASSA-PKCS1-v1_5 algorithm with a SHA-256 hash. + * + *

      See: https://tools.ietf.org/html/rfc3447#section-8.2

      + * + *

      Note: this function has the wrong name. It should be: + * mongocrypt_setopt_crypto_hook_sign_rsassa_pkcs1_v1_5

      + * + * @param crypt The @ref mongocrypt_t object. + * @param sign_rsaes_pkcs1_v1_5 The crypto callback function. + * @param sign_ctx A context passed as an argument to the crypto callback + * every invocation. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + */ + public static native boolean + mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5( + mongocrypt_t crypt, + mongocrypt_hmac_fn sign_rsaes_pkcs1_v1_5, + Pointer sign_ctx); + + /** + * Set a handler to get called on every log message. + * + * @param crypt The @ref mongocrypt_t object. + * @param aws_access_key_id The AWS access key ID used to generate KMS + * messages. + * @param aws_access_key_id_len The string length (in bytes) of @p + * * aws_access_key_id. Pass -1 to determine the string length with strlen (must + * * be NULL terminated). + * @param aws_secret_access_key The AWS secret access key used to generate + * KMS messages. + * @param aws_secret_access_key_len The string length (in bytes) of @p + * aws_secret_access_key. Pass -1 to determine the string length with strlen + * (must be NULL terminated). + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_setopt_kms_provider_aws(mongocrypt_t crypt, + cstring aws_access_key_id, + int aws_access_key_id_len, + cstring aws_secret_access_key, + int aws_secret_access_key_len); + + /** + * Configure a local KMS provider on the @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object. + * @param key A 64 byte master key used to encrypt and decrypt key vault keys. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_setopt_kms_provider_local(mongocrypt_t crypt, + mongocrypt_binary_t key); + + /** + * Configure KMS providers with a BSON document. + * + * @param crypt The @ref mongocrypt_t object. + * @param kms_providers A BSON document mapping the KMS provider names to credentials. + * @return A boolean indicating success. If false, an error status is set. + * @since 1.1 + */ + public static native boolean + mongocrypt_setopt_kms_providers(mongocrypt_t crypt, + mongocrypt_binary_t kms_providers); + + /** + * Set a local schema map for encryption. + * + * @param crypt The @ref mongocrypt_t object. + * @param schema_map A BSON document representing the schema map supplied by + * the user. The keys are collection namespaces and values are JSON schemas. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + */ + public static native boolean + mongocrypt_setopt_schema_map (mongocrypt_t crypt, mongocrypt_binary_t schema_map); + + /** + * Opt-into setting KMS providers before each KMS request. + * + * If set, before entering the MONGOCRYPT_CTX_NEED_KMS state, + * contexts will enter the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS state + * and then wait for credentials to be supplied through @ref mongocrypt_ctx_provide_kms_providers. + * + * @param crypt The @ref mongocrypt_t object to update + */ + public static native void + mongocrypt_setopt_use_need_kms_credentials_state (mongocrypt_t crypt); + + + /** + * Set a local EncryptedFieldConfigMap for encryption. + * + * @param crypt The @ref mongocrypt_t object. + * @param encryptedFieldConfigMap A BSON document representing the EncryptedFieldConfigMap + * supplied by the user. The keys are collection namespaces and values are + * EncryptedFieldConfigMap documents. The viewed data copied. It is valid to + * destroy @p efc_map with @ref mongocrypt_binary_destroy immediately after. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + */ + public static native boolean + mongocrypt_setopt_encrypted_field_config_map (mongocrypt_t crypt, mongocrypt_binary_t encryptedFieldConfigMap); + + /** + * Opt-into skipping query analysis. + * + *

      If opted in: + *

        + *
      • The crypt_shared shared library will not attempt to be loaded.
      • + *
      • A mongocrypt_ctx_t will never enter the MONGOCRYPT_CTX_NEED_MARKINGS state.
      • + *
      + * + * @param crypt The @ref mongocrypt_t object to update + * @since 1.5 + */ + public static native void + mongocrypt_setopt_bypass_query_analysis (mongocrypt_t crypt); + + /** + * Set the contention factor used for explicit encryption. + * The contention factor is only used for indexed Queryable Encryption. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param contention_factor the contention factor + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status. + * @since 1.5 + */ + public static native boolean + mongocrypt_ctx_setopt_contention_factor (mongocrypt_ctx_t ctx, long contention_factor); + + /** + * Set the index key id to use for Queryable Encryption explicit encryption. + * + * If the index key id not set, the key id from @ref mongocrypt_ctx_setopt_key_id is used. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_id The binary corresponding to the _id (a UUID) of the data key to use from + * the key vault collection. Note, the UUID must be encoded with RFC-4122 byte order. + * The viewed data is copied. It is valid to destroy key_id with @ref mongocrypt_binary_destroy immediately after. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + * @since 1.5 + */ + public static native boolean + mongocrypt_ctx_setopt_index_key_id (mongocrypt_ctx_t ctx, mongocrypt_binary_t key_id); + + /** + * Append an additional search directory to the search path for loading + * the crypt_shared dynamic library. + * + * @param crypt The @ref mongocrypt_t object to update + * @param path A null-terminated sequence of bytes for the search path. On + * some filesystems, this may be arbitrary bytes. On other filesystems, this may + * be required to be a valid UTF-8 code unit sequence. If the leading element of + * the path is the literal string "$ORIGIN", that substring will be replaced + * with the directory path containing the executable libmongocrypt module. If + * the path string is literal "$SYSTEM", then libmongocrypt will defer to the + * system's library resolution mechanism to find the crypt_shared library. + * + *

      If no crypt_shared dynamic library is found in any of the directories + * specified by the search paths loaded here, @ref mongocrypt_init() will still + * succeed and continue to operate without crypt_shared.

      + * + *

      The search paths are searched in the order that they are appended. This + * allows one to provide a precedence in how the library will be discovered. For + * example, appending known directories before appending "$SYSTEM" will allow + * one to supersede the system's installed library, but still fall-back to it if + * the library wasn't found otherwise. If one does not ever append "$SYSTEM", + * then the system's library-search mechanism will never be consulted.

      + * + *

      If an absolute path to the library is specified using @ref mongocrypt_setopt_set_crypt_shared_lib_path_override, + * then paths appended here will have no effect.

      + * @since 1.5 + */ + public static native void + mongocrypt_setopt_append_crypt_shared_lib_search_path (mongocrypt_t crypt, cstring path); + + /** + * Set a single override path for loading the crypt_shared dynamic library. + * @param crypt The @ref mongocrypt_t object to update + * @param path A null-terminated sequence of bytes for a path to the crypt_shared + * dynamic library. On some filesystems, this may be arbitrary bytes. On other + * filesystems, this may be required to be a valid UTF-8 code unit sequence. If + * the leading element of the path is the literal string `$ORIGIN`, that + * substring will be replaced with the directory path containing the executable + * libmongocrypt module. + * + *

      This function will do no IO nor path validation. All validation will + * occur during the call to @ref mongocrypt_init.

      + *

      If a crypt_shared library path override is specified here, then no paths given + * to @ref mongocrypt_setopt_append_crypt_shared_lib_search_path will be consulted when + * opening the crypt_shared library.

      + *

      If a path is provided via this API and @ref mongocrypt_init fails to + * initialize a valid crypt_shared library instance for the path specified, then + * the initialization of mongocrypt_t will fail with an error.

      + * @since 1.5 + */ + public static native void + mongocrypt_setopt_set_crypt_shared_lib_path_override(mongocrypt_t crypt, cstring path); + + /** + * Set the query type to use for Queryable Encryption explicit encryption. + * The query type is only used for indexed Queryable Encryption. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param query_type the query type + * @param len the length + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_query_type (mongocrypt_ctx_t ctx, cstring query_type, int len); + + /** + * Set options for explicit encryption with the "range" algorithm. + * NOTE: "range" is currently unstable API and subject to backwards breaking changes. + * + * opts is a BSON document of the form: + * { + * "min": Optional<BSON value>, + * "max": Optional<BSON value>, + * "sparsity": Int64, + * "precision": Optional<Int32> + * "trimFactor": Optional<Int32> + * } + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param opts BSON. + * @return A boolean indicating success. If false, an error status is set. + * @since 1.7 + */ + public static native boolean + mongocrypt_ctx_setopt_algorithm_range (mongocrypt_ctx_t ctx, mongocrypt_binary_t opts); + + /** + * Initialize new @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object. + * @return A boolean indicating success. Failure may occur if previously set options are invalid. + */ + public static native boolean + mongocrypt_init(mongocrypt_t crypt); + + + /** + * Get the status associated with a @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object. + * @param status Receives the status. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_status(mongocrypt_t crypt, mongocrypt_status_t status); + + /** + * Returns true if libmongocrypt was built with native crypto support. + * + *

      + * If libmongocrypt was not built with native crypto support, setting crypto hooks is required. + *

      + * + * @return true if libmongocrypt was built with native crypto support + */ + public static native boolean + mongocrypt_is_crypto_available(); + + /** + * Destroy the @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object to destroy. + */ + public static native void + mongocrypt_destroy(mongocrypt_t crypt); + + /** + * Obtain a nul-terminated version string of the loaded crypt_shared dynamic library, + * if available. + * + * If no crypt_shared was successfully loaded, this function returns NULL. + * + * @param crypt The mongocrypt_t object after a successful call to mongocrypt_init. + * @param len an optional length of the returned string. May be NULL. + * + * @return A nul-terminated string of the dynamically loaded crypt_shared library. + * @since 1.5 + */ + public static native cstring + mongocrypt_crypt_shared_lib_version_string (mongocrypt_t crypt, Pointer len); + + /** + * Call in response to the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS state + * to set per-context KMS provider settings. These follow the same format + * as @ref mongocrypt_setopt_kms_providers. If no keys are present in the + * BSON input, the KMS provider settings configured for the @ref mongocrypt_t + * at initialization are used. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param kms_providers A BSON document mapping the KMS provider names + * to credentials. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status. + */ + public static native boolean + mongocrypt_ctx_provide_kms_providers (mongocrypt_ctx_t ctx, + mongocrypt_binary_t kms_providers); + + /** + * Set the key id to use for explicit encryption. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_id The key_id to use. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_key_id (mongocrypt_ctx_t ctx, + mongocrypt_binary_t key_id); + + /** + * Set the keyAltName to use for explicit encryption. + * keyAltName should be a binary encoding a bson document + * with the following format: { "keyAltName" : >BSON UTF8 value< } + * + *

      It is an error to set both this and the key id.

      + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_alt_name The name to use. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_key_alt_name (mongocrypt_ctx_t ctx, + mongocrypt_binary_t key_alt_name); + + /** + * Set the keyMaterial to use for encrypting data. + * + *

      + * Pass the binary encoding of a BSON document like the following: + * { "keyMaterial" : (BSON BINARY value) } + *

      + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_material The data encryption key to use. The viewed data is + * copied. It is valid to destroy @p key_material with @ref + * mongocrypt_binary_destroy immediately after. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_key_material (mongocrypt_ctx_t ctx, mongocrypt_binary_t key_material); + + /** + * Set the algorithm used for encryption to either + * deterministic or random encryption. This value + * should only be set when using explicit encryption. + * + * If -1 is passed in for "len", then "algorithm" is + * assumed to be a null-terminated string. + * + * Valid values for algorithm are: + * "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + * "AEAD_AES_256_CBC_HMAC_SHA_512-Randomized" + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param algorithm A string specifying the algorithm to + * use for encryption. + * @param len The length of the algorithm string. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_algorithm (mongocrypt_ctx_t ctx, + cstring algorithm, + int len); + + + /** + * Create a new uninitialized @ref mongocrypt_ctx_t. + *

      + * Initialize the context with functions like @ref mongocrypt_ctx_encrypt_init. + * When done, destroy it with @ref mongocrypt_ctx_destroy. + * + * @param crypt The @ref mongocrypt_t object. + * @return A new context. + */ + public static native mongocrypt_ctx_t + mongocrypt_ctx_new(mongocrypt_t crypt); + + + /** + * Get the status associated with a @ref mongocrypt_ctx_t object. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param status Receives the status. + * @return A boolean indicating success. + */ + + public static native boolean + mongocrypt_ctx_status(mongocrypt_ctx_t ctx, mongocrypt_status_t status); + + + /** + * Identify the AWS KMS master key to use for creating a data key. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param region The AWS region. + * @param region_len The string length of @p region. Pass -1 to determine + * the string length with strlen (must be NULL terminated). + * @param cmk The Amazon Resource Name (ARN) of the customer master key + * (CMK). + * @param cmk_len The string length of @p cmk_len. Pass -1 to determine the + * string length with strlen (must be NULL terminated). + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_masterkey_aws (mongocrypt_ctx_t ctx, + cstring region, + int region_len, + cstring cmk, + int cmk_len); + + /** + * Identify a custom AWS endpoint when creating a data key. + * This is used internally to construct the correct HTTP request + * (with the Host header set to this endpoint). This endpoint + * is persisted in the new data key, and will be returned via + * mongocrypt_kms_ctx_endpoint. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param endpoint The endpoint. + * @param endpoint_len The string length of @p endpoint. Pass -1 to + * determine the string length with strlen (must be NULL terminated). + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_masterkey_aws_endpoint (mongocrypt_ctx_t ctx, + cstring endpoint, + int endpoint_len); + + + /** + * Set the master key to "local" for creating a data key. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_masterkey_local (mongocrypt_ctx_t ctx); + + /** + * Set key encryption key document for creating a data key. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param keyDocument BSON representing the key encryption key document. + * @return A boolean indicating success. If false, and error status is set. + * @since 1.1 + */ + public static native boolean + mongocrypt_ctx_setopt_key_encryption_key(mongocrypt_ctx_t ctx, + mongocrypt_binary_t keyDocument); + + /** + * Initialize a context to create a data key. + * + * Set options before using @ref mongocrypt_ctx_setopt_masterkey_aws and + * mongocrypt_ctx_setopt_masterkey_local. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + * + * Assumes a master key option has been set, and an associated KMS provider + * has been set on the parent @ref mongocrypt_t. + */ + public static native boolean + mongocrypt_ctx_datakey_init (mongocrypt_ctx_t ctx); + + /** + * Initialize a context for encryption. + * + * Associated options: + * - @ref mongocrypt_ctx_setopt_cache_noblock + * - @ref mongocrypt_ctx_setopt_schema + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param db The database name. + * @param db_len The byte length of @p db. Pass -1 to determine the string length with strlen (must be NULL terminated). + * @param cmd The BSON command to be encrypted. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_encrypt_init(mongocrypt_ctx_t ctx, + cstring db, + int db_len, + mongocrypt_binary_t cmd); + + /** + * Explicit helper method to encrypt a single BSON object. Contexts + * created for explicit encryption will not go through mongocryptd. + * + * To specify a key_id, algorithm, or iv to use, please use the + * corresponding mongocrypt_setopt methods before calling this. + * + * This method expects the passed-in BSON to be of the form: + * { "v" : BSON value to encrypt } + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param msg A @ref mongocrypt_binary_t the plaintext BSON value. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_explicit_encrypt_init (mongocrypt_ctx_t ctx, + mongocrypt_binary_t msg); + + /** + * Explicit helper method to encrypt a Match Expression or Aggregate Expression. + * Contexts created for explicit encryption will not go through mongocryptd. + * Requires query_type to be "range". + * NOTE: "range" is currently unstable API and subject to backwards breaking changes. + * + * This method expects the passed-in BSON to be of the form: + * { "v" : FLE2RangeFindDriverSpec } + * + * FLE2RangeFindDriverSpec is a BSON document with one of these forms: + * + * 1. A Match Expression of this form: + * {$and: [{<field>: {<op>: <value1>, {<field>: {<op>: <value2> }}]} + * 2. An Aggregate Expression of this form: + * {$and: [{<op>: [<fieldpath>, <value1>]}, {<op>: [<fieldpath>, <value2>]}] + * + * may be $lt, $lte, $gt, or $gte. + * + * The value of "v" is expected to be the BSON value passed to a driver + * ClientEncryption.encryptExpression helper. + * + * Associated options for FLE 1: + * - @ref mongocrypt_ctx_setopt_key_id + * - @ref mongocrypt_ctx_setopt_key_alt_name + * - @ref mongocrypt_ctx_setopt_algorithm + * + * Associated options for Queryable Encryption: + * - @ref mongocrypt_ctx_setopt_key_id + * - @ref mongocrypt_ctx_setopt_index_key_id + * - @ref mongocrypt_ctx_setopt_contention_factor + * - @ref mongocrypt_ctx_setopt_query_type + * - @ref mongocrypt_ctx_setopt_algorithm_range + * + * An error is returned if FLE 1 and Queryable Encryption incompatible options + * are set. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param msg A @ref mongocrypt_binary_t the plaintext BSON value. + * @return A boolean indicating success. + * @since 1.7 + */ + public static native boolean + mongocrypt_ctx_explicit_encrypt_expression_init (mongocrypt_ctx_t ctx, + mongocrypt_binary_t msg); + + /** + * Initialize a context for decryption. + * + * @param ctx The mongocrypt_ctx_t object. + * @param doc The document to be decrypted. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_decrypt_init(mongocrypt_ctx_t ctx, mongocrypt_binary_t doc); + + + /** + * Explicit helper method to decrypt a single BSON object. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param msg A @ref mongocrypt_binary_t the encrypted BSON. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_explicit_decrypt_init (mongocrypt_ctx_t ctx, + mongocrypt_binary_t msg); + + /** + * Initialize a context to rewrap datakeys. + * + * Associated options {@link #mongocrypt_ctx_setopt_key_encryption_key(mongocrypt_ctx_t, mongocrypt_binary_t)} + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param filter The filter to use for the find command on the key vault collection to retrieve datakeys to rewrap. + * @return A boolean indicating success. If false, and error status is set. + * @since 1.5 + */ + public static native boolean + mongocrypt_ctx_rewrap_many_datakey_init (mongocrypt_ctx_t ctx, mongocrypt_binary_t filter); + + + public static final int MONGOCRYPT_CTX_ERROR = 0; + public static final int MONGOCRYPT_CTX_NEED_MONGO_COLLINFO = 1; /* run on main MongoClient */ + public static final int MONGOCRYPT_CTX_NEED_MONGO_MARKINGS = 2; /* run on mongocryptd. */ + public static final int MONGOCRYPT_CTX_NEED_MONGO_KEYS = 3; /* run on key vault */ + public static final int MONGOCRYPT_CTX_NEED_KMS = 4; + public static final int MONGOCRYPT_CTX_READY = 5; /* ready for encryption/decryption */ + public static final int MONGOCRYPT_CTX_DONE = 6; + public static final int MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS = 7; /* fetch/renew KMS credentials */ + + public static final int MONGOCRYPT_INDEX_TYPE_NONE = 1; + public static final int MONGOCRYPT_INDEX_TYPE_EQUALITY = 2; + public static final int MONGOCRYPT_QUERY_TYPE_EQUALITY = 1; + + /** + * Get the current state of a context. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A @ref mongocrypt_ctx_state_t. + */ + public static native int + mongocrypt_ctx_state(mongocrypt_ctx_t ctx); + + + /** + * Get BSON necessary to run the mongo operation when mongocrypt_ctx_t + * is in MONGOCRYPT_CTX_NEED_MONGO_* states. + * + *

      + * op_bson is a BSON document to be used for the operation. + * - For MONGOCRYPT_CTX_NEED_MONGO_COLLINFO it is a listCollections filter. + * - For MONGOCRYPT_CTX_NEED_MONGO_KEYS it is a find filter. + * - For MONGOCRYPT_CTX_NEED_MONGO_MARKINGS it is a JSON schema to append. + *

      + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param op_bson A BSON document for the MongoDB operation. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_mongo_op(mongocrypt_ctx_t ctx, mongocrypt_binary_t op_bson); + + + /** + * Feed a BSON reply or result when when mongocrypt_ctx_t is in + * MONGOCRYPT_CTX_NEED_MONGO_* states. This may be called multiple times + * depending on the operation. + *

      + * op_bson is a BSON document to be used for the operation. + * - For MONGOCRYPT_CTX_NEED_MONGO_COLLINFO it is a doc from a listCollections + * cursor. + * - For MONGOCRYPT_CTX_NEED_MONGO_KEYS it is a doc from a find cursor. + * - For MONGOCRYPT_CTX_NEED_MONGO_MARKINGS it is a reply from mongocryptd. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param reply A BSON document for the MongoDB operation. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_mongo_feed(mongocrypt_ctx_t ctx, mongocrypt_binary_t reply); + + + /** + * Call when done feeding the reply (or replies) back to the context. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + */ + + public static native boolean + mongocrypt_ctx_mongo_done(mongocrypt_ctx_t ctx); + + /** + * Get the next KMS handle. + *

      + * Multiple KMS handles may be retrieved at once. Drivers may do this to fan + * out multiple concurrent KMS HTTP requests. Feeding multiple KMS requests + * is thread-safe. + *

      + * Is KMS handles are being handled synchronously, the driver can reuse the same + * TLS socket to send HTTP requests and receive responses. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @return a new @ref mongocrypt_kms_ctx_t or NULL. + */ + public static native mongocrypt_kms_ctx_t + mongocrypt_ctx_next_kms_ctx(mongocrypt_ctx_t ctx); + + /** + * Get the KMS provider identifier associated with this KMS request. + * + * This is used to conditionally configure TLS connections based on the KMS + * request. It is useful for KMIP, which authenticates with a client + * certificate. + * + * @param kms The mongocrypt_kms_ctx_t object. + * @param len Receives the length of the returned string. + * + * @return The name of the KMS provider + */ + public static native cstring + mongocrypt_kms_ctx_get_kms_provider(mongocrypt_kms_ctx_t kms, + Pointer len); + + /** + * Get the HTTP request message for a KMS handle. + * + * @param kms A @ref mongocrypt_kms_ctx_t. + * @param msg The HTTP request to send to KMS. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_message(mongocrypt_kms_ctx_t kms, + mongocrypt_binary_t msg); + + /** + * Get the hostname from which to connect over TLS. + *

      + * The storage for @p endpoint is not owned by the caller, but + * is valid until calling @ref mongocrypt_ctx_kms_done on the + * parent @ref mongocrypt_ctx_t. + * + * @param kms A @ref mongocrypt_kms_ctx_t. + * @param endpoint The output hostname. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_endpoint(mongocrypt_kms_ctx_t kms, PointerByReference endpoint); + + /** + * Indicates how many bytes to feed into @ref mongocrypt_kms_ctx_feed. + * + * @param kms The @ref mongocrypt_kms_ctx_t. + * @return The number of requested bytes. + */ + public static native int + mongocrypt_kms_ctx_bytes_needed(mongocrypt_kms_ctx_t kms); + + + /** + * Feed bytes from the HTTP response. + *

      + * Feeding more bytes than what has been returned in @ref + * mongocrypt_kms_ctx_bytes_needed is an error. + * + * @param kms The @ref mongocrypt_kms_ctx_t. + * @param bytes The bytes to feed. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_feed(mongocrypt_kms_ctx_t kms, mongocrypt_binary_t bytes); + + + /** + * Get the status associated with a @ref mongocrypt_kms_ctx_t object. + * + * @param kms The @ref mongocrypt_kms_ctx_t object. + * @param status Receives the status. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_status(mongocrypt_kms_ctx_t kms, + mongocrypt_status_t status); + + + /** + * Call when done handling all KMS contexts. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_kms_done(mongocrypt_ctx_t ctx); + + + /** + * Perform the final encryption or decryption. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param out The final BSON to send to the server. + * @return a boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_finalize(mongocrypt_ctx_t ctx, mongocrypt_binary_t out); + + + /** + * Destroy and free all memory associated with a @ref mongocrypt_ctx_t. + * + * @param ctx A @ref mongocrypt_ctx_t. + */ + public static native void + mongocrypt_ctx_destroy(mongocrypt_ctx_t ctx); + + static final String NATIVE_LIBRARY_NAME = "mongocrypt"; + + static { + Native.register(CAPI.class, NATIVE_LIBRARY_NAME); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPIHelper.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPIHelper.java new file mode 100644 index 00000000000..c1de63e8c8c --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPIHelper.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.sun.jna.Pointer; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.BasicOutputBuffer; + +import java.nio.ByteBuffer; + +import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_new_from_data; +import static java.lang.String.format; + +final class CAPIHelper { + + private static final CodecRegistry CODEC_REGISTRY = CodecRegistries.fromProviders(new BsonValueCodecProvider()); + + @SuppressWarnings("unchecked") + static BinaryHolder toBinary(final BsonDocument document) { + BasicOutputBuffer buffer = new BasicOutputBuffer(); + BsonBinaryWriter writer = new BsonBinaryWriter(buffer); + ((Codec) CODEC_REGISTRY.get(document.getClass())).encode(writer, document, EncoderContext.builder().build()); + + DisposableMemory memory = new DisposableMemory(buffer.size()); + memory.write(0, buffer.getInternalBuffer(), 0, buffer.size()); + + return new BinaryHolder(memory, mongocrypt_binary_new_from_data(memory, buffer.getSize())); + } + + static RawBsonDocument toDocument(final mongocrypt_binary_t binary) { + ByteBuffer byteBuffer = toByteBuffer(binary); + byte[] bytes = new byte[byteBuffer.remaining()]; + byteBuffer.get(bytes); + return new RawBsonDocument(bytes); + } + + static BinaryHolder toBinary(final ByteBuffer buffer) { + byte[] message = new byte[buffer.remaining()]; + buffer.get(message, 0, buffer.remaining()); + + DisposableMemory memory = new DisposableMemory(message.length); + memory.write(0, message, 0, message.length); + + return new BinaryHolder(memory, mongocrypt_binary_new_from_data(memory, message.length)); + } + + static ByteBuffer toByteBuffer(final mongocrypt_binary_t binary) { + Pointer pointer = binary.data(); + int length = binary.len(); + return pointer.getByteBuffer(0, length); + } + + static byte[] toByteArray(final mongocrypt_binary_t binary) { + ByteBuffer byteBuffer = toByteBuffer(binary); + byte[] byteArray = new byte[byteBuffer.remaining()]; + byteBuffer.get(byteArray); + return byteArray; + } + + static void writeByteArrayToBinary(final mongocrypt_binary_t binary, final byte[] bytes) { + if (binary.len() < bytes.length) { + throw new IllegalArgumentException(format("mongocrypt binary of length %d is not large enough to hold %d bytes", + binary.len(), bytes.length)); + } + Pointer outPointer = binary.data(); + outPointer.write(0, bytes, 0, bytes.length); + } + + private CAPIHelper() { + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CipherCallback.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/CipherCallback.java new file mode 100644 index 00000000000..b10c0f21c67 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/CipherCallback.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.cstring; +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_crypto_fn; +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import javax.crypto.Cipher; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.security.NoSuchAlgorithmException; +import java.util.concurrent.ConcurrentLinkedDeque; + +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class CipherCallback implements mongocrypt_crypto_fn { + private final String algorithm; + private final String transformation; + private final int mode; + private final CipherPool cipherPool; + + CipherCallback(final String algorithm, final String transformation, final int mode) { + this.algorithm = algorithm; + this.transformation = transformation; + this.mode = mode; + this.cipherPool = new CipherPool(); + } + + @Override + public boolean crypt(final Pointer ctx, final mongocrypt_binary_t key, final mongocrypt_binary_t iv, + final mongocrypt_binary_t in, final mongocrypt_binary_t out, + final Pointer bytesWritten, final mongocrypt_status_t status) { + Cipher cipher = null; + try { + IvParameterSpec ivParameterSpec = new IvParameterSpec(toByteArray(iv)); + SecretKeySpec secretKeySpec = new SecretKeySpec(toByteArray(key), algorithm); + cipher = cipherPool.get(); + cipher.init(mode, secretKeySpec, ivParameterSpec); + + byte[] result = cipher.doFinal(toByteArray(in)); + writeByteArrayToBinary(out, result); + bytesWritten.setInt(0, result.length); + + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } finally { + if (cipher != null) { + cipherPool.release(cipher); + } + } + } + + private class CipherPool { + private final ConcurrentLinkedDeque available = new ConcurrentLinkedDeque<>(); + + Cipher get() throws NoSuchAlgorithmException, NoSuchPaddingException { + Cipher cipher = available.pollLast(); + if (cipher != null) { + return cipher; + } + return Cipher.getInstance(transformation); + } + + void release(final Cipher cipher) { + available.addLast(cipher); + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/DisposableMemory.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/DisposableMemory.java new file mode 100644 index 00000000000..fdcfb268fea --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/DisposableMemory.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.sun.jna.Memory; + +// Subclass of JNA's Memory class so that we can call its protected dispose method +class DisposableMemory extends Memory { + DisposableMemory(final int size) { + super(size); + } + + public void dispose() { + super.dispose(); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/JULLogger.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/JULLogger.java new file mode 100644 index 00000000000..9a53e850d15 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/JULLogger.java @@ -0,0 +1,130 @@ + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import java.util.logging.Level; + +import static java.util.logging.Level.FINE; +import static java.util.logging.Level.FINER; +import static java.util.logging.Level.INFO; +import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; + +class JULLogger implements Logger { + + private final java.util.logging.Logger delegate; + + JULLogger(final String name) { + this.delegate = java.util.logging.Logger.getLogger(name); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public boolean isTraceEnabled() { + return isEnabled(FINER); + } + + @Override + public void trace(final String msg) { + log(FINER, msg); + } + + @Override + public void trace(final String msg, final Throwable t) { + log(FINER, msg, t); + } + + @Override + public boolean isDebugEnabled() { + return isEnabled(FINE); + } + + @Override + public void debug(final String msg) { + log(FINE, msg); + } + + @Override + public void debug(final String msg, final Throwable t) { + log(FINE, msg, t); + } + + @Override + public boolean isInfoEnabled() { + return delegate.isLoggable(INFO); + } + + @Override + public void info(final String msg) { + log(INFO, msg); + } + + @Override + public void info(final String msg, final Throwable t) { + log(INFO, msg, t); + } + + @Override + public boolean isWarnEnabled() { + return delegate.isLoggable(WARNING); + } + + @Override + public void warn(final String msg) { + log(WARNING, msg); + } + + @Override + public void warn(final String msg, final Throwable t) { + log(WARNING, msg, t); + } + + + @Override + public boolean isErrorEnabled() { + return delegate.isLoggable(SEVERE); + } + + @Override + public void error(final String msg) { + log(SEVERE, msg); + } + + @Override + public void error(final String msg, final Throwable t) { + log(SEVERE, msg, t); + } + + + private boolean isEnabled(final Level level) { + return delegate.isLoggable(level); + } + + private void log(final Level level, final String msg) { + delegate.log(level, msg); + } + + public void log(final Level level, final String msg, final Throwable t) { + delegate.log(level, msg, t); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/Logger.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/Logger.java new file mode 100644 index 00000000000..38e82c235b8 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/Logger.java @@ -0,0 +1,144 @@ + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +/** + * Not part of the public API + */ +public interface Logger { + /** + * Return the name of this Logger instance. + * + * @return name of this logger instance + */ + String getName(); + + /** + * Is the logger instance enabled for the TRACE level? + * + * @return True if this Logger is enabled for the TRACE level, false otherwise. + */ + boolean isTraceEnabled(); + + /** + * Log a message at the TRACE level. + * + * @param msg the message string to be logged + */ + void trace(String msg); + + /** + * Log an exception (throwable) at the TRACE level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void trace(String msg, Throwable t); + + /** + * Is the logger instance enabled for the DEBUG level? + * + * @return True if this Logger is enabled for the DEBUG level, false otherwise. + */ + boolean isDebugEnabled(); + + + /** + * Log a message at the DEBUG level. + * + * @param msg the message string to be logged + */ + void debug(String msg); + + + /** + * Log an exception (throwable) at the DEBUG level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void debug(String msg, Throwable t); + + /** + * Is the logger instance enabled for the INFO level? + * + * @return True if this Logger is enabled for the INFO level, false otherwise. + */ + boolean isInfoEnabled(); + + + /** + * Log a message at the INFO level. + * + * @param msg the message string to be logged + */ + void info(String msg); + + /** + * Log an exception (throwable) at the INFO level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void info(String msg, Throwable t); + + /** + * Is the logger instance enabled for the WARN level? + * + * @return True if this Logger is enabled for the WARN level, false otherwise. + */ + boolean isWarnEnabled(); + + /** + * Log a message at the WARN level. + * + * @param msg the message string to be logged + */ + void warn(String msg); + + /** + * Log an exception (throwable) at the WARN level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void warn(String msg, Throwable t); + + /** + * Is the logger instance enabled for the ERROR level? + * + * @return True if this Logger is enabled for the ERROR level, false otherwise. + */ + boolean isErrorEnabled(); + + /** + * Log a message at the ERROR level. + * + * @param msg the message string to be logged + */ + void error(String msg); + + /** + * Log an exception (throwable) at the ERROR level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void error(String msg, Throwable t); +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/Loggers.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/Loggers.java new file mode 100644 index 00000000000..c57cd3994e4 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/Loggers.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +/** + * This class is not part of the public API. + */ +public final class Loggers { + private static final String NAME = "org.mongodb.driver.crypt"; + + private static final boolean USE_SLF4J = shouldUseSLF4J(); + + /** + * @return the logger + */ + public static Logger getLogger() { + if (USE_SLF4J) { + return new SLF4JLogger(NAME); + } else { + return new JULLogger(NAME); + } + } + + private Loggers() { + } + + private static boolean shouldUseSLF4J() { + try { + Class.forName("org.slf4j.Logger"); + return true; + } catch (ClassNotFoundException e) { + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MacCallback.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MacCallback.java new file mode 100644 index 00000000000..2ea09550bb4 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MacCallback.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.cstring; +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_hmac_fn; +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class MacCallback implements mongocrypt_hmac_fn { + private final String algorithm; + + MacCallback(final String algorithm) { + this.algorithm = algorithm; + } + + @Override + public boolean hmac(final Pointer ctx, final mongocrypt_binary_t key, final mongocrypt_binary_t in, + final mongocrypt_binary_t out, final mongocrypt_status_t status) { + try { + Mac mac = Mac.getInstance(algorithm); + SecretKeySpec keySpec = new SecretKeySpec(toByteArray(key), algorithm); + mac.init(keySpec); + + mac.update(toByteArray(in)); + + byte[] result = mac.doFinal(); + writeByteArrayToBinary(out, result); + + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MessageDigestCallback.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MessageDigestCallback.java new file mode 100644 index 00000000000..861290d0a8f --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MessageDigestCallback.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.cstring; +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_hash_fn; +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import java.security.MessageDigest; + +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class MessageDigestCallback implements mongocrypt_hash_fn { + + private final String algorithm; + + MessageDigestCallback(final String algorithm) { + this.algorithm = algorithm; + } + + @Override + public boolean hash(final Pointer ctx, final mongocrypt_binary_t in, final mongocrypt_binary_t out, + final mongocrypt_status_t status) { + try { + MessageDigest messageDigest = MessageDigest.getInstance(algorithm); + messageDigest.update(toByteArray(in)); + byte[] digest = messageDigest.digest(); + writeByteArrayToBinary(out, digest); + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoAwsKmsProviderOptions.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoAwsKmsProviderOptions.java new file mode 100644 index 00000000000..4824197510d --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoAwsKmsProviderOptions.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import static org.bson.assertions.Assertions.notNull; + +/** + * The options for configuring the AWS KMS provider. + */ +public final class MongoAwsKmsProviderOptions { + + private final String accessKeyId; + private final String secretAccessKey; + + /** + * Construct a builder for the options + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the access key id + * + * @return the access key id, which may not be null + */ + public String getAccessKeyId() { + return accessKeyId; + } + + /** + * Gets the secret access key + * + * @return the secret access key, which may not be null + */ + public String getSecretAccessKey() { + return secretAccessKey; + } + + + /** + * The builder for the options + */ + public static final class Builder { + private String accessKeyId; + private String secretAccessKey; + + private Builder() { + } + + /** + * Sets the access key id. + * + * @param accessKeyId the access key id + * @return this + */ + public Builder accessKeyId(final String accessKeyId) { + this.accessKeyId = accessKeyId; + return this; + } + + /** + * Sets the secret access key. + * + * @param secretAccessKey the secret access key + * @return this + */ + public Builder secretAccessKey(final String secretAccessKey) { + this.secretAccessKey = secretAccessKey; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoAwsKmsProviderOptions build() { + return new MongoAwsKmsProviderOptions(this); + } + } + + private MongoAwsKmsProviderOptions(final Builder builder) { + this.accessKeyId = notNull("AWS KMS provider accessKeyId", builder.accessKeyId); + this.secretAccessKey = notNull("AWS KMS provider secretAccessKey", builder.secretAccessKey); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypt.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypt.java new file mode 100644 index 00000000000..74816dbe42c --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypt.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + + +package com.mongodb.crypt.capi; + +import org.bson.BsonDocument; + +import java.io.Closeable; + +/** + * A context for encryption/decryption operations. + */ +public interface MongoCrypt extends Closeable { + + /** + * Create a context to use for encryption + * + * @param database the namespace + * @param command the document representing the command to encrypt + * @return the context + */ + MongoCryptContext createEncryptionContext(String database, BsonDocument command); + + /** + * Create a context to use for decryption + * + * @param document the document to decrypt + * @return the context + */ + MongoCryptContext createDecryptionContext(BsonDocument document); + + /** + * Create a context to use for creating a data key + * @param kmsProvider the KMS provider + * @param options the data key options + * @return the context + */ + MongoCryptContext createDataKeyContext(String kmsProvider, MongoDataKeyOptions options); + + /** + * Create a context to use for encryption + * + * @param document the document to encrypt, which must be in the form { "v" : BSON value to encrypt } + * @param options the explicit encryption options + * @return the context + */ + MongoCryptContext createExplicitEncryptionContext(BsonDocument document, MongoExplicitEncryptOptions options); + + /** + * Create a context to use for encryption + * + * @param document the document to encrypt, which must be in the form { "v" : BSON value to encrypt } + * @param options the expression encryption options + * @return the context + * @since 1.7 + */ + MongoCryptContext createEncryptExpressionContext(BsonDocument document, MongoExplicitEncryptOptions options); + + /** + * Create a context to use for encryption + * + * @param document the document to decrypt,which must be in the form { "v" : encrypted BSON value } + * @return the context + */ + MongoCryptContext createExplicitDecryptionContext(BsonDocument document); + + /** + * Create a context to use for encryption + * + * @param filter The filter to use for the find command on the key vault collection to retrieve datakeys to rewrap. + * @param options the rewrap many data key options + * @return the context + * @since 1.5 + */ + MongoCryptContext createRewrapManyDatakeyContext(BsonDocument filter, MongoRewrapManyDataKeyOptions options); + + /** + * @return the version string of the loaded crypt shared dynamic library if available or null + * @since 1.5 + */ + String getCryptSharedLibVersionString(); + + @Override + void close(); +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContext.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContext.java new file mode 100644 index 00000000000..2c3aa250b87 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContext.java @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; + +import java.io.Closeable; + +/** + * An interface representing the lifecycle of an encryption or decryption request. It's modelled as a state machine. + */ +public interface MongoCryptContext extends Closeable { + + /** + * The possible states. + */ + enum State { + /** + * Needs collection information from the cluster encrypting to + */ + NEED_MONGO_COLLINFO(CAPI.MONGOCRYPT_CTX_NEED_MONGO_COLLINFO), + + /** + * Need to mark command with encryption markers + */ + NEED_MONGO_MARKINGS(CAPI.MONGOCRYPT_CTX_NEED_MONGO_MARKINGS), + + /** + * Need keys from the key vault + */ + NEED_MONGO_KEYS(CAPI.MONGOCRYPT_CTX_NEED_MONGO_KEYS), + + /** + * Need the key management service + */ + NEED_KMS(CAPI.MONGOCRYPT_CTX_NEED_KMS), + + /** + * Need to fetch/renew KMS credentials + * @since 1.4 + */ + NEED_KMS_CREDENTIALS(CAPI.MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS), + + /** + * Ready for encryption/decryption + */ + READY(CAPI.MONGOCRYPT_CTX_READY), + + /** + * Done + */ + DONE(CAPI.MONGOCRYPT_CTX_DONE); + + private final int index; + + State(final int index) { + this.index = index; + } + + static State fromIndex(final int index) { + for (State state : State.values()) { + if (state.index == index) { + return state; + } + } + throw new MongoCryptException("Unknown context state " + index); + } + } + + /** + * Gets the current state. + * + * @return the current state + */ + State getState(); + + /** + * + * @return the operation to execute + */ + RawBsonDocument getMongoOperation(); + + /** + * + * @param document a result of the operation + */ + void addMongoOperationResult(BsonDocument document); + + /** + * Signal completion of the operation + */ + void completeMongoOperation(); + + /** + * Provide KMS credentials on demand, in response to NEED_KMS_CREDENTIALS state + * + * @param credentialsDocument document containing all credentials + * @since 1.4 + */ + void provideKmsProviderCredentials(BsonDocument credentialsDocument); + + /** + * + * @return the next key decryptor, or null if there are no more + */ + MongoKeyDecryptor nextKeyDecryptor(); + + /** + * Indicate that all key decryptors have been completed + */ + void completeKeyDecryptors(); + + /** + * + * @return the encrypted or decrypted document + */ + RawBsonDocument finish(); + + @Override + void close(); +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContextImpl.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContextImpl.java new file mode 100644 index 00000000000..34aaafe7344 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContextImpl.java @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_t; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; + +import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_destroy; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_new; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_destroy; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_finalize; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_kms_done; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_mongo_done; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_mongo_feed; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_mongo_op; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_next_kms_ctx; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_provide_kms_providers; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_state; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_status; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import static com.mongodb.crypt.capi.CAPIHelper.toBinary; +import static com.mongodb.crypt.capi.CAPIHelper.toDocument; +import static org.bson.assertions.Assertions.isTrue; +import static org.bson.assertions.Assertions.notNull; + +class MongoCryptContextImpl implements MongoCryptContext { + private final mongocrypt_ctx_t wrapped; + private volatile boolean closed; + + MongoCryptContextImpl(final mongocrypt_ctx_t wrapped) { + notNull("wrapped", wrapped); + this.wrapped = wrapped; + } + + @Override + public State getState() { + isTrue("open", !closed); + return State.fromIndex(mongocrypt_ctx_state(wrapped)); + } + + @Override + public RawBsonDocument getMongoOperation() { + isTrue("open", !closed); + mongocrypt_binary_t binary = mongocrypt_binary_new(); + + try { + boolean success = mongocrypt_ctx_mongo_op(wrapped, binary); + if (!success) { + throwExceptionFromStatus(); + } + return toDocument(binary); + } finally { + mongocrypt_binary_destroy(binary); + } + } + + @Override + public void addMongoOperationResult(final BsonDocument document) { + isTrue("open", !closed); + + try (BinaryHolder binaryHolder = toBinary(document)) { + boolean success = mongocrypt_ctx_mongo_feed(wrapped, binaryHolder.getBinary()); + if (!success) { + throwExceptionFromStatus(); + } + } + } + + @Override + public void completeMongoOperation() { + isTrue("open", !closed); + boolean success = mongocrypt_ctx_mongo_done(wrapped); + if (!success) { + throwExceptionFromStatus(); + } + } + + @Override + public void provideKmsProviderCredentials(final BsonDocument credentialsDocument) { + try (BinaryHolder binaryHolder = toBinary(credentialsDocument)) { + boolean success = mongocrypt_ctx_provide_kms_providers(wrapped, binaryHolder.getBinary()); + if (!success) { + throwExceptionFromStatus(); + } + } + } + + @Override + public MongoKeyDecryptor nextKeyDecryptor() { + isTrue("open", !closed); + + mongocrypt_kms_ctx_t kmsContext = mongocrypt_ctx_next_kms_ctx(wrapped); + if (kmsContext == null) { + return null; + } + return new MongoKeyDecryptorImpl(kmsContext); + } + + @Override + public void completeKeyDecryptors() { + isTrue("open", !closed); + + boolean success = mongocrypt_ctx_kms_done(wrapped); + if (!success) { + throwExceptionFromStatus(); + } + + } + + @Override + public RawBsonDocument finish() { + isTrue("open", !closed); + + mongocrypt_binary_t binary = mongocrypt_binary_new(); + + try { + boolean success = mongocrypt_ctx_finalize(wrapped, binary); + if (!success) { + throwExceptionFromStatus(); + } + return toDocument(binary); + } finally { + mongocrypt_binary_destroy(binary); + } + } + + @Override + public void close() { + mongocrypt_ctx_destroy(wrapped); + closed = true; + } + + static void throwExceptionFromStatus(final mongocrypt_ctx_t wrapped) { + mongocrypt_status_t status = mongocrypt_status_new(); + mongocrypt_ctx_status(wrapped, status); + MongoCryptException e = new MongoCryptException(status); + mongocrypt_status_destroy(status); + throw e; + } + + private void throwExceptionFromStatus() { + throwExceptionFromStatus(wrapped); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java new file mode 100644 index 00000000000..63074e20bc9 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + + +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; + +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_code; +import static org.bson.assertions.Assertions.isTrue; + +/** + * Top level Exception for all Mongo Crypt CAPI exceptions + */ +public class MongoCryptException extends RuntimeException { + private static final long serialVersionUID = -5524416583514807953L; + private final int code; + + /** + * @param msg the message + */ + public MongoCryptException(final String msg) { + super(msg); + this.code = -1; + } + + /** + * @param msg the message + * @param cause the cause + */ + public MongoCryptException(final String msg, final Throwable cause) { + super(msg, cause); + this.code = -1; + } + + /** + * Construct an instance from a {@code mongocrypt_status_t}. + * + * @param status the status + */ + MongoCryptException(final mongocrypt_status_t status) { + super(CAPI.mongocrypt_status_message(status, null).toString()); + isTrue("status not ok", !CAPI.mongocrypt_status_ok(status)); + code = mongocrypt_status_code(status); + } + + /** + * @return the error code for the exception. + */ + public int getCode() { + return code; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptImpl.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptImpl.java new file mode 100644 index 00000000000..2949e2a11e4 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptImpl.java @@ -0,0 +1,423 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.cstring; +import com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_log_fn_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_t; +import com.sun.jna.Pointer; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import javax.crypto.Cipher; +import java.nio.ByteBuffer; +import java.security.SecureRandom; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; + +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_ERROR; +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_FATAL; +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_INFO; +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_TRACE; +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_WARNING; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_crypt_shared_lib_version_string; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_datakey_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_decrypt_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_encrypt_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_explicit_decrypt_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_explicit_encrypt_expression_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_explicit_encrypt_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_new; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_rewrap_many_datakey_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm_range; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_contention_factor; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_alt_name; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_encryption_key; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_id; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_material; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_query_type; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_destroy; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_init; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_is_crypto_available; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_new; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_aes_256_ctr; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_append_crypt_shared_lib_search_path; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_bypass_query_analysis; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_crypto_hooks; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_encrypted_field_config_map; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_aws; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_local; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_kms_providers; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_log_handler; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_schema_map; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_set_crypt_shared_lib_path_override; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_use_need_kms_credentials_state; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.crypt.capi.CAPIHelper.toBinary; +import static org.bson.assertions.Assertions.isTrue; +import static org.bson.assertions.Assertions.notNull; + +class MongoCryptImpl implements MongoCrypt { + private static final Logger LOGGER = Loggers.getLogger(); + private final mongocrypt_t wrapped; + + // Keep a strong reference to all the callbacks so that they don't get garbage collected + @SuppressWarnings("FieldCanBeLocal") + private final LogCallback logCallback; + + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCBC256EncryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCBC256DecryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCTR256EncryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCTR256DecryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final MacCallback hmacSha512Callback; + @SuppressWarnings("FieldCanBeLocal") + private final MacCallback hmacSha256Callback; + @SuppressWarnings("FieldCanBeLocal") + private final MessageDigestCallback sha256Callback; + @SuppressWarnings("FieldCanBeLocal") + private final SecureRandomCallback secureRandomCallback; + @SuppressWarnings("FieldCanBeLocal") + private final SigningRSAESPKCSCallback signingRSAESPKCSCallback; + + private final AtomicBoolean closed; + + MongoCryptImpl(final MongoCryptOptions options) { + closed = new AtomicBoolean(); + wrapped = mongocrypt_new(); + if (wrapped == null) { + throw new MongoCryptException("Unable to create new mongocrypt object"); + } + + logCallback = new LogCallback(); + + configure(() -> mongocrypt_setopt_log_handler(wrapped, logCallback, null)); + + if (mongocrypt_is_crypto_available()) { + LOGGER.debug("libmongocrypt is compiled with cryptography support, so not registering Java callbacks"); + aesCBC256EncryptCallback = null; + aesCBC256DecryptCallback = null; + aesCTR256EncryptCallback = null; + aesCTR256DecryptCallback = null; + hmacSha512Callback = null; + hmacSha256Callback = null; + sha256Callback = null; + secureRandomCallback = null; + signingRSAESPKCSCallback = null; + } else { + LOGGER.debug("libmongocrypt is compiled without cryptography support, so registering Java callbacks"); + // We specify NoPadding here because the underlying C library is responsible for padding prior + // to executing the callback + aesCBC256EncryptCallback = new CipherCallback("AES", "AES/CBC/NoPadding", Cipher.ENCRYPT_MODE); + aesCBC256DecryptCallback = new CipherCallback("AES", "AES/CBC/NoPadding", Cipher.DECRYPT_MODE); + aesCTR256EncryptCallback = new CipherCallback("AES", "AES/CTR/NoPadding", Cipher.ENCRYPT_MODE); + aesCTR256DecryptCallback = new CipherCallback("AES", "AES/CTR/NoPadding", Cipher.DECRYPT_MODE); + + hmacSha512Callback = new MacCallback("HmacSHA512"); + hmacSha256Callback = new MacCallback("HmacSHA256"); + sha256Callback = new MessageDigestCallback("SHA-256"); + secureRandomCallback = new SecureRandomCallback(new SecureRandom()); + + configure(() -> mongocrypt_setopt_crypto_hooks(wrapped, aesCBC256EncryptCallback, aesCBC256DecryptCallback, + secureRandomCallback, hmacSha512Callback, hmacSha256Callback, + sha256Callback, null)); + + signingRSAESPKCSCallback = new SigningRSAESPKCSCallback(); + configure(() -> mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5(wrapped, signingRSAESPKCSCallback, null)); + configure(() -> mongocrypt_setopt_aes_256_ctr(wrapped, aesCTR256EncryptCallback, aesCTR256DecryptCallback, null)); + } + + if (options.getLocalKmsProviderOptions() != null) { + try (BinaryHolder localMasterKeyBinaryHolder = toBinary(options.getLocalKmsProviderOptions().getLocalMasterKey())) { + configure(() -> mongocrypt_setopt_kms_provider_local(wrapped, localMasterKeyBinaryHolder.getBinary())); + } + } + + if (options.getAwsKmsProviderOptions() != null) { + configure(() -> mongocrypt_setopt_kms_provider_aws(wrapped, + new cstring(options.getAwsKmsProviderOptions().getAccessKeyId()), -1, + new cstring(options.getAwsKmsProviderOptions().getSecretAccessKey()), -1)); + } + + if (options.isNeedsKmsCredentialsStateEnabled()) { + mongocrypt_setopt_use_need_kms_credentials_state(wrapped); + } + + if (options.getKmsProviderOptions() != null) { + try (BinaryHolder binaryHolder = toBinary(options.getKmsProviderOptions())) { + configure(() -> mongocrypt_setopt_kms_providers(wrapped, binaryHolder.getBinary())); + } + } + + if (options.getLocalSchemaMap() != null) { + BsonDocument localSchemaMapDocument = new BsonDocument(); + localSchemaMapDocument.putAll(options.getLocalSchemaMap()); + + try (BinaryHolder localSchemaMapBinaryHolder = toBinary(localSchemaMapDocument)) { + configure(() -> mongocrypt_setopt_schema_map(wrapped, localSchemaMapBinaryHolder.getBinary())); + } + } + + if (options.isBypassQueryAnalysis()) { + mongocrypt_setopt_bypass_query_analysis(wrapped); + } + + if (options.getEncryptedFieldsMap() != null) { + BsonDocument localEncryptedFieldsMap = new BsonDocument(); + localEncryptedFieldsMap.putAll(options.getEncryptedFieldsMap()); + + try (BinaryHolder localEncryptedFieldsMapHolder = toBinary(localEncryptedFieldsMap)) { + configure(() -> mongocrypt_setopt_encrypted_field_config_map(wrapped, localEncryptedFieldsMapHolder.getBinary())); + } + } + + options.getSearchPaths().forEach(p -> mongocrypt_setopt_append_crypt_shared_lib_search_path(wrapped, new cstring(p))); + if (options.getExtraOptions().containsKey("cryptSharedLibPath")) { + mongocrypt_setopt_set_crypt_shared_lib_path_override(wrapped, new cstring(options.getExtraOptions().getString("cryptSharedLibPath").getValue())); + } + + configure(() -> mongocrypt_init(wrapped)); + } + + @Override + public MongoCryptContext createEncryptionContext(final String database, final BsonDocument commandDocument) { + isTrue("open", !closed.get()); + notNull("database", database); + notNull("commandDocument", commandDocument); + mongocrypt_ctx_t context = mongocrypt_ctx_new(wrapped); + if (context == null) { + throwExceptionFromStatus(); + } + + try (BinaryHolder commandDocumentBinaryHolder = toBinary(commandDocument)) { + configure(() -> mongocrypt_ctx_encrypt_init(context, new cstring(database), -1, + commandDocumentBinaryHolder.getBinary()), context); + return new MongoCryptContextImpl(context); + } + } + + @Override + public MongoCryptContext createDecryptionContext(final BsonDocument document) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = mongocrypt_ctx_new(wrapped); + if (context == null) { + throwExceptionFromStatus(); + } + try (BinaryHolder documentBinaryHolder = toBinary(document)){ + configure(() -> mongocrypt_ctx_decrypt_init(context, documentBinaryHolder.getBinary()), context); + } + return new MongoCryptContextImpl(context); + } + + @Override + public MongoCryptContext createDataKeyContext(final String kmsProvider, final MongoDataKeyOptions options) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = mongocrypt_ctx_new(wrapped); + if (context == null) { + throwExceptionFromStatus(); + } + + BsonDocument keyDocument = new BsonDocument("provider", new BsonString(kmsProvider)); + BsonDocument masterKey = options.getMasterKey(); + if (masterKey != null) { + masterKey.forEach(keyDocument::append); + } + try (BinaryHolder masterKeyHolder = toBinary(keyDocument)) { + configure(() -> mongocrypt_ctx_setopt_key_encryption_key(context, masterKeyHolder.getBinary()), context); + } + + if (options.getKeyAltNames() != null) { + for (String cur : options.getKeyAltNames()) { + try (BinaryHolder keyAltNameBinaryHolder = toBinary(new BsonDocument("keyAltName", new BsonString(cur)))) { + configure(() -> mongocrypt_ctx_setopt_key_alt_name(context, keyAltNameBinaryHolder.getBinary()), context); + } + } + } + + if (options.getKeyMaterial() != null) { + try (BinaryHolder keyMaterialBinaryHolder = toBinary(new BsonDocument("keyMaterial", new BsonBinary(options.getKeyMaterial())))) { + configure(() -> mongocrypt_ctx_setopt_key_material(context, keyMaterialBinaryHolder.getBinary()), context); + } + } + + if (!mongocrypt_ctx_datakey_init(context)) { + MongoCryptContextImpl.throwExceptionFromStatus(context); + } + return new MongoCryptContextImpl(context); + } + + @Override + public MongoCryptContext createExplicitEncryptionContext(final BsonDocument document, final MongoExplicitEncryptOptions options) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = configureExplicitEncryption(options); + + try (BinaryHolder documentBinaryHolder = toBinary(document)) { + configure(() -> mongocrypt_ctx_explicit_encrypt_init(context, documentBinaryHolder.getBinary()), context); + } + + return new MongoCryptContextImpl(context); + } + + @Override + public MongoCryptContext createEncryptExpressionContext(final BsonDocument document, final MongoExplicitEncryptOptions options) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = configureExplicitEncryption(options); + + try (BinaryHolder documentBinaryHolder = toBinary(document)) { + configure(() -> mongocrypt_ctx_explicit_encrypt_expression_init(context, documentBinaryHolder.getBinary()), context); + } + return new MongoCryptContextImpl(context); + } + + @Override + public MongoCryptContext createExplicitDecryptionContext(final BsonDocument document) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = mongocrypt_ctx_new(wrapped); + if (context == null) { + throwExceptionFromStatus(); + } + try (BinaryHolder binaryHolder = toBinary(document)) { + configure(() -> mongocrypt_ctx_explicit_decrypt_init(context, binaryHolder.getBinary()), context); + } + return new MongoCryptContextImpl(context); + } + + @Override + public MongoCryptContext createRewrapManyDatakeyContext(final BsonDocument filter, final MongoRewrapManyDataKeyOptions options) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = mongocrypt_ctx_new(wrapped); + if (context == null) { + throwExceptionFromStatus(); + } + + if (options != null && options.getProvider() != null) { + BsonDocument keyDocument = new BsonDocument("provider", new BsonString(options.getProvider())); + BsonDocument masterKey = options.getMasterKey(); + if (masterKey != null) { + masterKey.forEach(keyDocument::append); + } + try (BinaryHolder binaryHolder = toBinary(keyDocument)) { + configure(() -> mongocrypt_ctx_setopt_key_encryption_key(context, binaryHolder.getBinary()), context); + } + } + + try (BinaryHolder binaryHolder = toBinary(filter)) { + configure(() -> mongocrypt_ctx_rewrap_many_datakey_init(context, binaryHolder.getBinary()), context); + } + return new MongoCryptContextImpl(context); + } + + @Override + public String getCryptSharedLibVersionString() { + cstring versionString = mongocrypt_crypt_shared_lib_version_string(wrapped, null); + return versionString == null ? null : versionString.toString(); + } + + @Override + public void close() { + if (!closed.getAndSet(true)) { + mongocrypt_destroy(wrapped); + } + } + + private mongocrypt_ctx_t configureExplicitEncryption(final MongoExplicitEncryptOptions options) { + mongocrypt_ctx_t context = mongocrypt_ctx_new(wrapped); + if (context == null) { + throwExceptionFromStatus(); + } + + if (options.getKeyId() != null) { + try (BinaryHolder keyIdBinaryHolder = toBinary(ByteBuffer.wrap(options.getKeyId().getData()))) { + configure(() -> mongocrypt_ctx_setopt_key_id(context, keyIdBinaryHolder.getBinary()), context); + } + } else if (options.getKeyAltName() != null) { + try (BinaryHolder keyAltNameBinaryHolder = toBinary(new BsonDocument("keyAltName", new BsonString(options.getKeyAltName())))) { + configure(() -> mongocrypt_ctx_setopt_key_alt_name(context, keyAltNameBinaryHolder.getBinary()), context); + } + } + + if (options.getAlgorithm() != null) { + configure(() -> mongocrypt_ctx_setopt_algorithm(context, new cstring(options.getAlgorithm()), -1), context); + } + if (options.getQueryType() != null) { + configure(() -> mongocrypt_ctx_setopt_query_type(context, new cstring(options.getQueryType()), -1), context); + } + if (options.getContentionFactor() != null) { + configure(() -> mongocrypt_ctx_setopt_contention_factor(context, options.getContentionFactor()), context); + } + if (options.getRangeOptions() != null) { + try (BinaryHolder rangeOptionsHolder = toBinary(options.getRangeOptions())) { + configure(() -> mongocrypt_ctx_setopt_algorithm_range(context, rangeOptionsHolder.getBinary()), context); + } + } + return context; + } + + + private void configure(final Supplier successSupplier) { + if (!successSupplier.get()) { + throwExceptionFromStatus(); + } + } + + private void configure(final Supplier successSupplier, final mongocrypt_ctx_t context) { + if (!successSupplier.get()) { + MongoCryptContextImpl.throwExceptionFromStatus(context); + } + } + + private void throwExceptionFromStatus() { + mongocrypt_status_t status = mongocrypt_status_new(); + mongocrypt_status(wrapped, status); + MongoCryptException e = new MongoCryptException(status); + mongocrypt_status_destroy(status); + throw e; + } + + static class LogCallback implements mongocrypt_log_fn_t { + @Override + public void log(final int level, final cstring message, final int messageLength, final Pointer ctx) { + if (level == MONGOCRYPT_LOG_LEVEL_FATAL) { + LOGGER.error(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_ERROR) { + LOGGER.error(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_WARNING) { + LOGGER.warn(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_INFO) { + LOGGER.info(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_TRACE) { + LOGGER.trace(message.toString()); + } + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptOptions.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptOptions.java new file mode 100644 index 00000000000..dc65bbdd9ae --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptOptions.java @@ -0,0 +1,284 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import org.bson.BsonDocument; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; +import static org.bson.assertions.Assertions.isTrue; + +/** + * The options for configuring MongoCrypt. + */ +public final class MongoCryptOptions { + + private final MongoAwsKmsProviderOptions awsKmsProviderOptions; + private final MongoLocalKmsProviderOptions localKmsProviderOptions; + private final BsonDocument kmsProviderOptions; + private final Map localSchemaMap; + private final boolean needsKmsCredentialsStateEnabled; + private final Map encryptedFieldsMap; + private final BsonDocument extraOptions; + private final boolean bypassQueryAnalysis; + private final List searchPaths; + + + /** + * Construct a builder for the options + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the AWS KMS provider options. + * + * @return the AWS KMS provider options, which may be null + */ + public MongoAwsKmsProviderOptions getAwsKmsProviderOptions() { + return awsKmsProviderOptions; + } + + /** + * Gets the local KMS provider options. + * + * @return the local KMS provider options, which may be null + */ + public MongoLocalKmsProviderOptions getLocalKmsProviderOptions() { + return localKmsProviderOptions; + } + + /** + * Returns the KMS provider options. + * + * @return the KMS provider options, which may be null + * @since 1.1 + */ + public BsonDocument getKmsProviderOptions() { + return kmsProviderOptions; + } + + /** + * Gets the local schema map. + * + * @return the local schema map + */ + public Map getLocalSchemaMap() { + return localSchemaMap; + } + + /** + * Gets whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled. Defaults to false + * + * @return whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled + * @since 1.4 + */ + public boolean isNeedsKmsCredentialsStateEnabled() { + return needsKmsCredentialsStateEnabled; + } + + /** + * Gets the encrypted fields map. + * + * @since 1.5 + * @return the encrypted fields map + */ + public Map getEncryptedFieldsMap() { + return encryptedFieldsMap; + } + + /** + * Gets whether automatic analysis of outgoing commands should be disabled. + * + * @since 1.5 + * @return true if bypassing query analysis + */ + public boolean isBypassQueryAnalysis() { + return bypassQueryAnalysis; + } + + /** + * The extraOptions that relate to the mongocryptd process or shared library. + * @return the extra options + * @since 1.5 + */ + public BsonDocument getExtraOptions() { + return extraOptions; + } + + /** + * Gets the search paths + * @return this + * @since 1.5 + */ + public List getSearchPaths() { + return searchPaths; + } + + /** + * The builder for the options + */ + public static final class Builder { + private MongoAwsKmsProviderOptions awsKmsProviderOptions; + private MongoLocalKmsProviderOptions localKmsProviderOptions; + private BsonDocument kmsProviderOptions = null; + private Map localSchemaMap = null; + private boolean needsKmsCredentialsStateEnabled; + private Map encryptedFieldsMap = null; + private boolean bypassQueryAnalysis; + private BsonDocument extraOptions = new BsonDocument(); + private List searchPaths = emptyList(); + + private Builder() { + } + + /** + * Sets the AWS KMS provider options. + * + * @param awsKmsProviderOptions the AWS KMS provider options + * @return this + */ + public Builder awsKmsProviderOptions(final MongoAwsKmsProviderOptions awsKmsProviderOptions) { + this.awsKmsProviderOptions = awsKmsProviderOptions; + return this; + } + + /** + * Sets the local KMS provider options. + * + * @param localKmsProviderOptions the local KMS provider options + * @return this + */ + public Builder localKmsProviderOptions(final MongoLocalKmsProviderOptions localKmsProviderOptions) { + this.localKmsProviderOptions = localKmsProviderOptions; + return this; + } + + /** + * Sets the KMS provider options. + * + * @param kmsProviderOptions the KMS provider options document + * @return this + * @since 1.1 + */ + public Builder kmsProviderOptions(final BsonDocument kmsProviderOptions) { + this.kmsProviderOptions = kmsProviderOptions; + return this; + } + + /** + * Sets the local schema map. + * + * @param localSchemaMap local schema map + * @return this + */ + public Builder localSchemaMap(final Map localSchemaMap) { + this.localSchemaMap = localSchemaMap; + return this; + } + + /** + * Sets whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled. Defaults to false + * + * @param needsKmsCredentialsStateEnabled whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled + * @return this + * @since 1.4 + */ + public Builder needsKmsCredentialsStateEnabled(final boolean needsKmsCredentialsStateEnabled) { + this.needsKmsCredentialsStateEnabled = needsKmsCredentialsStateEnabled; + return this; + } + + /** + * Sets the encrypted fields map. + * + * @param encryptedFieldsMap the encrypted fields map + * @since 1.5 + * @return this + */ + public Builder encryptedFieldsMap(final Map encryptedFieldsMap) { + this.encryptedFieldsMap = encryptedFieldsMap; + return this; + } + + /** + * Sets whether automatic analysis of outgoing commands should be disabled. + * + *

      Set bypassQueryAnalysis to true to use explicit encryption on indexed fields + * without the MongoDB Enterprise Advanced licensed crypt shared library.

      + * + * @param bypassQueryAnalysis whether the analysis of outgoing commands should be disabled. + * @since 1.5 + * @return this + */ + public Builder bypassQueryAnalysis(final boolean bypassQueryAnalysis) { + this.bypassQueryAnalysis = bypassQueryAnalysis; + return this; + } + + /** + * The extraOptions that relate to the mongocryptd process or shared library. + * @param extraOptions the extraOptions + * @return this + * @since 1.5 + */ + public Builder extraOptions(final BsonDocument extraOptions) { + this.extraOptions = extraOptions; + return this; + } + + /** + * Sets search paths + * @param searchPaths sets search path + * @return this + * @since 1.5 + */ + public Builder searchPaths(final List searchPaths) { + this.searchPaths = searchPaths; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoCryptOptions build() { + return new MongoCryptOptions(this); + } + } + + private MongoCryptOptions(final Builder builder) { + isTrue("at least one KMS provider is configured", + builder.awsKmsProviderOptions != null || builder.localKmsProviderOptions != null + || builder.kmsProviderOptions != null); + this.awsKmsProviderOptions = builder.awsKmsProviderOptions; + this.localKmsProviderOptions = builder.localKmsProviderOptions; + this.kmsProviderOptions = builder.kmsProviderOptions; + this.localSchemaMap = builder.localSchemaMap; + this.needsKmsCredentialsStateEnabled = builder.needsKmsCredentialsStateEnabled; + this.encryptedFieldsMap = builder.encryptedFieldsMap; + this.bypassQueryAnalysis = builder.bypassQueryAnalysis; + this.extraOptions = builder.extraOptions; + this.searchPaths = builder.searchPaths; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypts.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypts.java new file mode 100644 index 00000000000..683dcdf90f1 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypts.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +/** + * The entry point to the MongoCrypt library. + */ +public final class MongoCrypts { + + private MongoCrypts() { + //NOP + } + + /** + * Create a {@code MongoCrypt} instance. + * + *

      + * Make sure that JNA is able to find the shared library, most likely by setting the jna.library.path system property + *

      + * + * @param options the options + * @return the instance + */ + public static MongoCrypt create(final MongoCryptOptions options) { + return new MongoCryptImpl(options); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoDataKeyOptions.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoDataKeyOptions.java new file mode 100644 index 00000000000..27f62514aeb --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoDataKeyOptions.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import org.bson.BsonDocument; + +import java.util.List; + +/** + * The options for creation of a data key + */ +public final class MongoDataKeyOptions { + private final List keyAltNames; + private final BsonDocument masterKey; + private final byte[] keyMaterial; + + /** + * Options builder + */ + public static final class Builder { + private List keyAltNames; + private BsonDocument masterKey; + private byte[] keyMaterial; + + /** + * Add alternate key names + * @param keyAltNames the alternate key names + * @return this + */ + public Builder keyAltNames(final List keyAltNames) { + this.keyAltNames = keyAltNames; + return this; + } + + /** + * Add the master key. + * + * @param masterKey the master key + * @return this + */ + public Builder masterKey(final BsonDocument masterKey) { + this.masterKey = masterKey; + return this; + } + + /** + * Add the key material + * + * @param keyMaterial the optional custom key material for the data key + * @return this + * @since 1.5 + */ + public Builder keyMaterial(final byte[] keyMaterial) { + this.keyMaterial = keyMaterial; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoDataKeyOptions build() { + return new MongoDataKeyOptions(this); + } + } + + /** + * Create a builder for the options. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the alternate key names for the data key. + * + * @return the alternate key names + */ + public List getKeyAltNames() { + return keyAltNames; + } + + /** + * Gets the master key for the data key. + * + * @return the master key + */ + public BsonDocument getMasterKey() { + return masterKey; + } + + /** + * Gets the custom key material if set. + * + * @return the custom key material for the data key or null + * @since 1.5 + */ + public byte[] getKeyMaterial() { + return keyMaterial; + } + + private MongoDataKeyOptions(final Builder builder) { + keyAltNames = builder.keyAltNames; + masterKey = builder.masterKey; + keyMaterial = builder.keyMaterial; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoExplicitEncryptOptions.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoExplicitEncryptOptions.java new file mode 100644 index 00000000000..2dad2182e7d --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoExplicitEncryptOptions.java @@ -0,0 +1,227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import org.bson.BsonBinary; +import org.bson.BsonDocument; + +import java.util.Objects; + +/** + * Options for explicit encryption. + */ +public final class MongoExplicitEncryptOptions { + private final BsonBinary keyId; + private final String keyAltName; + private final String algorithm; + private final Long contentionFactor; + private final String queryType; + private final BsonDocument rangeOptions; + + /** + * The builder for the options + */ + public static final class Builder { + private BsonBinary keyId; + private String keyAltName; + private String algorithm; + private Long contentionFactor; + private String queryType; + private BsonDocument rangeOptions; + + private Builder() { + } + + /** + * Add the key identifier. + * + * @param keyId the key idenfifier + * @return this + */ + public Builder keyId(final BsonBinary keyId) { + this.keyId = keyId; + return this; + } + + /** + * Add the key alternative name. + * + * @param keyAltName the key alternative name + * @return this + */ + public Builder keyAltName(final String keyAltName) { + this.keyAltName = keyAltName; + return this; + } + + /** + * Add the encryption algorithm. + * + *

      To insert or query with an "Indexed" encrypted payload, use a MongoClient configured with {@code AutoEncryptionSettings}. + * {@code AutoEncryptionSettings.bypassQueryAnalysis} may be true. + * {@code AutoEncryptionSettings.bypassAutoEncryption must be false}.

      + * + * @param algorithm the encryption algorithm + * @return this + */ + public Builder algorithm(final String algorithm) { + this.algorithm = algorithm; + return this; + } + + /** + * The contention factor. + * + *

      It is an error to set contentionFactor when algorithm is not "Indexed". + * @param contentionFactor the contention factor + * @return this + * @since 1.5 + */ + public Builder contentionFactor(final Long contentionFactor) { + this.contentionFactor = contentionFactor; + return this; + } + + /** + * The QueryType. + * + *

      It is an error to set queryType when algorithm is not "Indexed".

      + * + * @param queryType the query type + * @return this + * @since 1.5 + */ + public Builder queryType(final String queryType) { + this.queryType = queryType; + return this; + } + + /** + * The Range Options. + * + *

      It is an error to set rangeOptions when the algorithm is not "range".

      + * + * @param rangeOptions the range options + * @return this + * @since 1.7 + */ + public Builder rangeOptions(final BsonDocument rangeOptions) { + this.rangeOptions = rangeOptions; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoExplicitEncryptOptions build() { + return new MongoExplicitEncryptOptions(this); + } + } + + /** + * Create a builder for the options. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the key identifier + * @return the key identifier + */ + public BsonBinary getKeyId() { + return keyId; + } + + /** + * Gets the key alternative name + * @return the key alternative name + */ + public String getKeyAltName() { + return keyAltName; + } + + /** + * Gets the encryption algorithm + * @return the encryption algorithm + */ + public String getAlgorithm() { + return algorithm; + } + + /** + * Gets the contention factor + * @return the contention factor + * @since 1.5 + */ + public Long getContentionFactor() { + return contentionFactor; + } + + /** + * Gets the query type + * @return the query type + * @since 1.5 + */ + public String getQueryType() { + return queryType; + } + + /** + * Gets the range options + * @return the range options + * @since 1.7 + */ + public BsonDocument getRangeOptions() { + return rangeOptions; + } + + private MongoExplicitEncryptOptions(final Builder builder) { + this.keyId = builder.keyId; + this.keyAltName = builder.keyAltName; + this.algorithm = builder.algorithm; + this.contentionFactor = builder.contentionFactor; + this.queryType = builder.queryType; + this.rangeOptions = builder.rangeOptions; + if (!(Objects.equals(algorithm, "Indexed") || Objects.equals(algorithm, "Range"))) { + if (contentionFactor != null) { + throw new IllegalStateException( + "Invalid configuration, contentionFactor can only be set if algorithm is 'Indexed' or 'Range'"); + } else if (queryType != null) { + throw new IllegalStateException( + "Invalid configuration, queryType can only be set if algorithm is 'Indexed' or 'Range'"); + } + } + } + + @Override + public String toString() { + return "MongoExplicitEncryptOptions{" + + "keyId=" + keyId + + ", keyAltName='" + keyAltName + '\'' + + ", algorithm='" + algorithm + '\'' + + ", contentionFactor=" + contentionFactor + + ", queryType='" + queryType + '\'' + + ", rangeOptions=" + rangeOptions + + '}'; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptor.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptor.java new file mode 100644 index 00000000000..43a724348d6 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptor.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import java.nio.ByteBuffer; + +/** + * An interface representing a key decryption operation using a key management service. + */ +public interface MongoKeyDecryptor { + + /** + * Gets the name of the KMS provider, e.g. "aws" or "kmip" + * + * @return the KMS provider name + */ + String getKmsProvider(); + + /** + * Gets the host name of the key management service. + * + * @return the host name + */ + String getHostName(); + + /** + * Gets the message to send to the key management service. + * + *

      + * Clients should call this method first, and send the message on a TLS connection to a configured KMS server. + *

      + * + * @return the message to send + */ + ByteBuffer getMessage(); + + /** + * Gets the number of bytes that should be received from the KMS server. + * + *

      + * After sending the message to the KMS server, clients should call this method in a loop, receiving {@code bytesNeeded} from + * the KMS server and feeding those bytes to this decryptor, until {@code bytesNeeded} is 0. + *

      + * + * @return the actual number of bytes that clients should be prepared receive + */ + int bytesNeeded(); + + /** + * Feed the received bytes to the decryptor. + * + *

      + * After sending the message to the KMS server, clients should call this method in a loop, receiving the number of bytes indicated by + * a call to {@link #bytesNeeded()} from the KMS server and feeding those bytes to this decryptor, until {@link #bytesNeeded()} + * returns 0. + *

      + * + * @param bytes the received bytes + */ + void feed(ByteBuffer bytes); +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptorImpl.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptorImpl.java new file mode 100644 index 00000000000..cef14bf855f --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptorImpl.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; +import com.sun.jna.ptr.PointerByReference; + +import java.nio.ByteBuffer; + +import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_destroy; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_new; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_bytes_needed; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_endpoint; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_feed; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_get_kms_provider; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_message; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_status; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.crypt.capi.CAPIHelper.toBinary; +import static com.mongodb.crypt.capi.CAPIHelper.toByteBuffer; +import static org.bson.assertions.Assertions.notNull; + +class MongoKeyDecryptorImpl implements MongoKeyDecryptor { + private final mongocrypt_kms_ctx_t wrapped; + + MongoKeyDecryptorImpl(final mongocrypt_kms_ctx_t wrapped) { + notNull("wrapped", wrapped); + this.wrapped = wrapped; + } + + @Override + public String getKmsProvider() { + return mongocrypt_kms_ctx_get_kms_provider(wrapped, null).toString(); + } + + @Override + public String getHostName() { + PointerByReference hostNamePointerByReference = new PointerByReference(); + boolean success = mongocrypt_kms_ctx_endpoint(wrapped, hostNamePointerByReference); + if (!success) { + throwExceptionFromStatus(); + } + Pointer hostNamePointer = hostNamePointerByReference.getValue(); + return hostNamePointer.getString(0); + } + + @Override + public ByteBuffer getMessage() { + mongocrypt_binary_t binary = mongocrypt_binary_new(); + + try { + boolean success = mongocrypt_kms_ctx_message(wrapped, binary); + if (!success) { + throwExceptionFromStatus(); + } + return toByteBuffer(binary); + } finally { + mongocrypt_binary_destroy(binary); + } + } + + @Override + public int bytesNeeded() { + return mongocrypt_kms_ctx_bytes_needed(wrapped); + } + + @Override + public void feed(final ByteBuffer bytes) { + try (BinaryHolder binaryHolder = toBinary(bytes)) { + boolean success = mongocrypt_kms_ctx_feed(wrapped, binaryHolder.getBinary()); + if (!success) { + throwExceptionFromStatus(); + } + } + } + + private void throwExceptionFromStatus() { + mongocrypt_status_t status = mongocrypt_status_new(); + mongocrypt_kms_ctx_status(wrapped, status); + MongoCryptException e = new MongoCryptException(status); + mongocrypt_status_destroy(status); + throw e; + } + +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoLocalKmsProviderOptions.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoLocalKmsProviderOptions.java new file mode 100644 index 00000000000..be8eef09573 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoLocalKmsProviderOptions.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import java.nio.ByteBuffer; + +import static org.bson.assertions.Assertions.notNull; + +/** + * The options for configuring a local KMS provider. + */ +public final class MongoLocalKmsProviderOptions { + + private final ByteBuffer localMasterKey; + + /** + * Construct a builder for the options + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the local master key + * + * @return the local master key + */ + public ByteBuffer getLocalMasterKey() { + return localMasterKey; + } + + /** + * The builder for the options + */ + public static final class Builder { + private ByteBuffer localMasterKey; + + private Builder() { + } + + /** + * Sets the local master key. + * + * @param localMasterKey the local master key + * @return this + */ + public Builder localMasterKey(final ByteBuffer localMasterKey) { + this.localMasterKey = localMasterKey; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoLocalKmsProviderOptions build() { + return new MongoLocalKmsProviderOptions(this); + } + } + + private MongoLocalKmsProviderOptions(final Builder builder) { + this.localMasterKey = notNull("Local KMS provider localMasterKey", builder.localMasterKey); + + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoRewrapManyDataKeyOptions.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoRewrapManyDataKeyOptions.java new file mode 100644 index 00000000000..0bfc6defa63 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoRewrapManyDataKeyOptions.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.crypt.capi; + +import org.bson.BsonDocument; + +/** + * The rewrap many data key options + * + *

      + * The masterKey document MUST have the fields corresponding to the given provider as specified in masterKey. + *

      + * + * @since 1.5 + */ +public final class MongoRewrapManyDataKeyOptions { + + private final String provider; + private final BsonDocument masterKey; + + /** + * Options builder + */ + public static final class Builder { + private String provider; + private BsonDocument masterKey; + + /** + * The provider + * + * @param provider the provider + * @return this + */ + public Builder provider(final String provider) { + this.provider = provider; + return this; + } + + /** + * Add the master key. + * + * @param masterKey the master key + * @return this + */ + public Builder masterKey(final BsonDocument masterKey) { + this.masterKey = masterKey; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoRewrapManyDataKeyOptions build() { + return new MongoRewrapManyDataKeyOptions(this); + } + } + + /** + * Create a builder for the options. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * @return the provider name + */ + public String getProvider() { + return provider; + } + + /** + * Gets the master key for the data key. + * + * @return the master key + */ + public BsonDocument getMasterKey() { + return masterKey; + } + + private MongoRewrapManyDataKeyOptions(final Builder builder) { + provider = builder.provider; + masterKey = builder.masterKey; + } +} + diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SLF4JLogger.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/SLF4JLogger.java new file mode 100644 index 00000000000..23064f8bf85 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/SLF4JLogger.java @@ -0,0 +1,110 @@ + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import org.slf4j.LoggerFactory; + +class SLF4JLogger implements Logger { + + private final org.slf4j.Logger delegate; + + SLF4JLogger(final String name) { + this.delegate = LoggerFactory.getLogger(name); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public boolean isTraceEnabled() { + return delegate.isTraceEnabled(); + } + + @Override + public void trace(final String msg) { + delegate.trace(msg); + } + + @Override + public void trace(final String msg, final Throwable t) { + delegate.trace(msg, t); + } + + @Override + public boolean isDebugEnabled() { + return delegate.isDebugEnabled(); + } + + @Override + public void debug(final String msg) { + delegate.debug(msg); + } + + @Override + public void debug(final String msg, final Throwable t) { + delegate.debug(msg, t); + } + + @Override + public boolean isInfoEnabled() { + return delegate.isInfoEnabled(); + } + + @Override + public void info(final String msg) { + delegate.info(msg); + } + + @Override + public void info(final String msg, final Throwable t) { + delegate.info(msg, t); + } + + @Override + public boolean isWarnEnabled() { + return delegate.isWarnEnabled(); + } + + @Override + public void warn(final String msg) { + delegate.warn(msg); + } + + @Override + public void warn(final String msg, final Throwable t) { + delegate.warn(msg, t); + } + + @Override + public boolean isErrorEnabled() { + return delegate.isErrorEnabled(); + } + + @Override + public void error(final String msg) { + delegate.error(msg); + } + + @Override + public void error(final String msg, final Throwable t) { + delegate.error(msg, t); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SecureRandomCallback.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/SecureRandomCallback.java new file mode 100644 index 00000000000..0a2a83c02f7 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/SecureRandomCallback.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.cstring; +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_random_fn; +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import java.security.SecureRandom; + +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class SecureRandomCallback implements mongocrypt_random_fn { + private final SecureRandom secureRandom; + + SecureRandomCallback(final SecureRandom secureRandom) { + this.secureRandom = secureRandom; + } + + @Override + public boolean random(final Pointer ctx, final mongocrypt_binary_t out, final int count, final mongocrypt_status_t status) { + try { + byte[] randomBytes = new byte[count]; + secureRandom.nextBytes(randomBytes); + writeByteArrayToBinary(out, randomBytes); + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SigningRSAESPKCSCallback.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/SigningRSAESPKCSCallback.java new file mode 100644 index 00000000000..a5b7ac9f050 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/SigningRSAESPKCSCallback.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.CAPI.cstring; +import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.crypt.capi.CAPI.mongocrypt_hmac_fn; +import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import java.security.InvalidKeyException; +import java.security.KeyFactory; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.Signature; +import java.security.SignatureException; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.KeySpec; +import java.security.spec.PKCS8EncodedKeySpec; + +import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class SigningRSAESPKCSCallback implements mongocrypt_hmac_fn { + + private static final String KEY_ALGORITHM = "RSA"; + private static final String SIGN_ALGORITHM = "SHA256withRSA"; + + SigningRSAESPKCSCallback() { + } + + @Override + public boolean hmac(final Pointer ctx, final mongocrypt_binary_t key, final mongocrypt_binary_t in, + final mongocrypt_binary_t out, final mongocrypt_status_t status) { + try { + byte[] result = getSignature(toByteArray(key), toByteArray(in)); + writeByteArrayToBinary(out, result); + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } + + static byte[] getSignature(final byte[] privateKeyBytes, final byte[] dataToSign) throws NoSuchAlgorithmException, InvalidKeySpecException, InvalidKeyException, SignatureException { + KeySpec keySpec = new PKCS8EncodedKeySpec(privateKeyBytes); + KeyFactory keyFactory = KeyFactory.getInstance(KEY_ALGORITHM); + PrivateKey privateKey = keyFactory.generatePrivate(keySpec); + + Signature privateSignature = Signature.getInstance(SIGN_ALGORITHM); + privateSignature.initSign(privateKey); + privateSignature.update(dataToSign); + + return privateSignature.sign(); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/package-info.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/package-info.java new file mode 100644 index 00000000000..c1c9060de33 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * The mongocrypt API package + */ +package com.mongodb.crypt.capi; diff --git a/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json b/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json new file mode 100644 index 00000000000..44e398cb556 --- /dev/null +++ b/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json @@ -0,0 +1,180 @@ +[ +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_crypto_fn", + "methods":[{"name":"crypt","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hash_fn", + "methods":[{"name":"hash","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hmac_fn", + "methods":[{"name":"hmac","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_log_fn_t", + "methods":[{"name":"log","parameterTypes":["int","com.mongodb.crypt.capi.CAPI$cstring","int","com.sun.jna.Pointer"] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_random_fn", + "methods":[{"name":"random","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","int","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.sun.jna.Callback" +}, +{ + "name":"com.sun.jna.CallbackReference", + "methods":[{"name":"getCallback","parameterTypes":["java.lang.Class","com.sun.jna.Pointer","boolean"] }, {"name":"getFunctionPointer","parameterTypes":["com.sun.jna.Callback","boolean"] }, {"name":"getNativeString","parameterTypes":["java.lang.Object","boolean"] }, {"name":"initializeThread","parameterTypes":["com.sun.jna.Callback","com.sun.jna.CallbackReference$AttachOptions"] }] +}, +{ + "name":"com.sun.jna.CallbackReference$AttachOptions" +}, +{ + "name":"com.sun.jna.FromNativeConverter", + "methods":[{"name":"nativeType","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.IntegerType", + "fields":[{"name":"value"}] +}, +{ + "name":"com.sun.jna.JNIEnv" +}, +{ + "name":"com.sun.jna.Native", + "methods":[{"name":"dispose","parameterTypes":[] }, {"name":"fromNative","parameterTypes":["com.sun.jna.FromNativeConverter","java.lang.Object","java.lang.reflect.Method"] }, {"name":"fromNative","parameterTypes":["java.lang.Class","java.lang.Object"] }, {"name":"fromNative","parameterTypes":["java.lang.reflect.Method","java.lang.Object"] }, {"name":"nativeType","parameterTypes":["java.lang.Class"] }, {"name":"toNative","parameterTypes":["com.sun.jna.ToNativeConverter","java.lang.Object"] }] +}, +{ + "name":"com.sun.jna.Native$ffi_callback", + "methods":[{"name":"invoke","parameterTypes":["long","long","long"] }] +}, +{ + "name":"com.sun.jna.NativeMapped", + "methods":[{"name":"toNative","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.Pointer", + "fields":[{"name":"peer"}], + "methods":[{"name":"","parameterTypes":["long"] }] +}, +{ + "name":"com.sun.jna.PointerType", + "fields":[{"name":"pointer"}] +}, +{ + "name":"com.sun.jna.Structure", + "fields":[{"name":"memory"}, {"name":"typeInfo"}], + "methods":[{"name":"autoRead","parameterTypes":[] }, {"name":"autoWrite","parameterTypes":[] }, {"name":"getTypeInfo","parameterTypes":[] }, {"name":"newInstance","parameterTypes":["java.lang.Class","long"] }] +}, +{ + "name":"com.sun.jna.Structure$ByValue" +}, +{ + "name":"com.sun.jna.Structure$FFIType$FFITypes", + "fields":[{"name":"ffi_type_double"}, {"name":"ffi_type_float"}, {"name":"ffi_type_longdouble"}, {"name":"ffi_type_pointer"}, {"name":"ffi_type_sint16"}, {"name":"ffi_type_sint32"}, {"name":"ffi_type_sint64"}, {"name":"ffi_type_sint8"}, {"name":"ffi_type_uint16"}, {"name":"ffi_type_uint32"}, {"name":"ffi_type_uint64"}, {"name":"ffi_type_uint8"}, {"name":"ffi_type_void"}] +}, +{ + "name":"com.sun.jna.WString", + "methods":[{"name":"","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"java.lang.Boolean", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["boolean"] }, {"name":"getBoolean","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"java.lang.Byte", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["byte"] }] +}, +{ + "name":"java.lang.Character", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["char"] }] +}, +{ + "name":"java.lang.Class", + "methods":[{"name":"getComponentType","parameterTypes":[] }] +}, +{ + "name":"java.lang.Double", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["double"] }] +}, +{ + "name":"java.lang.Float", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["float"] }] +}, +{ + "name":"java.lang.Integer", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["int"] }] +}, +{ + "name":"java.lang.Long", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["long"] }] +}, +{ + "name":"java.lang.Object", + "methods":[{"name":"toString","parameterTypes":[] }] +}, +{ + "name":"java.lang.Short", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["short"] }] +}, +{ + "name":"java.lang.String", + "methods":[{"name":"","parameterTypes":["byte[]"] }, {"name":"","parameterTypes":["byte[]","java.lang.String"] }, {"name":"getBytes","parameterTypes":[] }, {"name":"getBytes","parameterTypes":["java.lang.String"] }, {"name":"lastIndexOf","parameterTypes":["int"] }, {"name":"substring","parameterTypes":["int"] }, {"name":"toCharArray","parameterTypes":[] }] +}, +{ + "name":"java.lang.System", + "methods":[{"name":"getProperty","parameterTypes":["java.lang.String"] }, {"name":"setProperty","parameterTypes":["java.lang.String","java.lang.String"] }] +}, +{ + "name":"java.lang.UnsatisfiedLinkError", + "methods":[{"name":"","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"java.lang.Void", + "fields":[{"name":"TYPE"}] +}, +{ + "name":"java.lang.reflect.Method", + "methods":[{"name":"getParameterTypes","parameterTypes":[] }, {"name":"getReturnType","parameterTypes":[] }] +}, +{ + "name":"java.nio.Buffer", + "methods":[{"name":"position","parameterTypes":[] }] +}, +{ + "name":"java.nio.ByteBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.CharBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.DoubleBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.FloatBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.IntBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.LongBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.ShortBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +} +] diff --git a/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json b/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 00000000000..4187c0e8eab --- /dev/null +++ b/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,134 @@ +[ +{ + "name":"com.mongodb.crypt.capi.CAPI", + "allPublicFields":true, + "queryAllDeclaredMethods":true +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$cstring", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_crypto_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_ctx_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hash_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hmac_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_kms_ctx_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_log_fn_t", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_random_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_status_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.CallbackProxy", + "methods":[{"name":"callback","parameterTypes":["java.lang.Object[]"] }] +}, +{ + "name":"com.sun.jna.Pointer", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"com.sun.jna.Structure$FFIType", + "allDeclaredFields":true, + "queryAllPublicConstructors":true, + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}], + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.Structure$FFIType$size_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.ptr.PointerByReference", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}], + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"boolean", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"com.sun.crypto.provider.AESCipher$General", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.crypto.provider.HmacCore$HmacSHA256", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.crypto.provider.HmacCore$HmacSHA512", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"int", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"java.lang.Throwable", + "methods":[{"name":"addSuppressed","parameterTypes":["java.lang.Throwable"] }] +}, +{ + "name":"java.lang.reflect.Method", + "methods":[{"name":"isVarArgs","parameterTypes":[] }] +}, +{ + "name":"java.nio.Buffer" +}, +{ + "name":"long", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"sun.security.provider.NativePRNG", + "methods":[{"name":"","parameterTypes":[] }, {"name":"","parameterTypes":["java.security.SecureRandomParameters"] }] +}, +{ + "name":"sun.security.provider.SHA2$SHA256", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"sun.security.provider.SHA5$SHA512", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"void", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"org.slf4j.Logger" +} +] diff --git a/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java b/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java new file mode 100644 index 00000000000..87fbab2e82f --- /dev/null +++ b/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java @@ -0,0 +1,388 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.crypt.capi.MongoCryptContext.State; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.RawBsonDocument; +import org.junit.jupiter.api.Test; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +@SuppressWarnings("SameParameterValue") +public class MongoCryptTest { + @Test + public void testEncrypt() throws URISyntaxException, IOException { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + MongoCryptContext encryptor = mongoCrypt.createEncryptionContext("test", getResourceAsDocument("command.json")); + + assertEquals(State.NEED_MONGO_COLLINFO, encryptor.getState()); + + BsonDocument listCollectionsFilter = encryptor.getMongoOperation(); + assertEquals(getResourceAsDocument("list-collections-filter.json"), listCollectionsFilter); + + encryptor.addMongoOperationResult(getResourceAsDocument("collection-info.json")); + encryptor.completeMongoOperation(); + assertEquals(State.NEED_MONGO_MARKINGS, encryptor.getState()); + + BsonDocument jsonSchema = encryptor.getMongoOperation(); + assertEquals(getResourceAsDocument("mongocryptd-command.json"), jsonSchema); + + encryptor.addMongoOperationResult(getResourceAsDocument("mongocryptd-reply.json")); + encryptor.completeMongoOperation(); + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + + testKeyDecryptor(encryptor); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument encryptedDocument = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(getResourceAsDocument("encrypted-command.json"), encryptedDocument); + + encryptor.close(); + + mongoCrypt.close(); + } + + + @Test + public void testDecrypt() throws IOException, URISyntaxException { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + MongoCryptContext decryptor = mongoCrypt.createDecryptionContext(getResourceAsDocument("encrypted-command-reply.json")); + + assertEquals(State.NEED_MONGO_KEYS, decryptor.getState()); + + testKeyDecryptor(decryptor); + + assertEquals(State.READY, decryptor.getState()); + + RawBsonDocument decryptedDocument = decryptor.finish(); + assertEquals(State.DONE, decryptor.getState()); + assertEquals(getResourceAsDocument("command-reply.json"), decryptedDocument); + + decryptor.close(); + + mongoCrypt.close(); + } + + @Test + public void testEmptyAwsCredentials() throws URISyntaxException, IOException { + MongoCrypt mongoCrypt = MongoCrypts.create(MongoCryptOptions + .builder() + .kmsProviderOptions(new BsonDocument("aws", new BsonDocument())) + .needsKmsCredentialsStateEnabled(true) + .build()); + + MongoCryptContext decryptor = mongoCrypt.createDecryptionContext(getResourceAsDocument("encrypted-command-reply.json")); + + assertEquals(State.NEED_KMS_CREDENTIALS, decryptor.getState()); + + BsonDocument awsCredentials = new BsonDocument(); + awsCredentials.put("accessKeyId", new BsonString("example")); + awsCredentials.put("secretAccessKey", new BsonString("example")); + + decryptor.provideKmsProviderCredentials(new BsonDocument("aws", awsCredentials)); + + assertEquals(State.NEED_MONGO_KEYS, decryptor.getState()); + + mongoCrypt.close(); + } + + @Test + public void testMultipleCloseCalls() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + mongoCrypt.close(); + mongoCrypt.close(); + } + + @Test + public void testDataKeyCreation() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + List keyAltNames = Arrays.asList("first", "second"); + MongoCryptContext dataKeyContext = mongoCrypt.createDataKeyContext("local", + MongoDataKeyOptions.builder().masterKey(new BsonDocument()) + .keyAltNames(keyAltNames) + .build()); + assertEquals(State.READY, dataKeyContext.getState()); + + RawBsonDocument dataKeyDocument = dataKeyContext.finish(); + assertEquals(State.DONE, dataKeyContext.getState()); + assertNotNull(dataKeyDocument); + + List actualKeyAltNames = dataKeyDocument.getArray("keyAltNames").stream() + .map(bsonValue -> bsonValue.asString().getValue()) + .sorted() + .collect(Collectors.toList()); + assertIterableEquals(keyAltNames, actualKeyAltNames); + dataKeyContext.close(); + mongoCrypt.close(); + } + + @Test + public void testExplicitEncryptionDecryption() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument documentToEncrypt = new BsonDocument("v", new BsonString("hello")); + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("YWFhYWFhYWFhYWFhYWFhYQ=="))) + .algorithm("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .build(); + MongoCryptContext encryptor = mongoCrypt.createExplicitEncryptionContext(documentToEncrypt, options); + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + + testKeyDecryptor(encryptor); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument encryptedDocument = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(getResourceAsDocument("encrypted-value.json"), encryptedDocument); + + MongoCryptContext decryptor = mongoCrypt.createExplicitDecryptionContext(encryptedDocument); + + assertEquals(State.READY, decryptor.getState()); + + RawBsonDocument decryptedDocument = decryptor.finish(); + assertEquals(State.DONE, decryptor.getState()); + assertEquals(documentToEncrypt, decryptedDocument); + + encryptor.close(); + + mongoCrypt.close(); + } + + + @Test + public void testExplicitExpressionEncryption() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument valueToEncrypt = getResourceAsDocument("fle2-find-range-explicit-v2/int32/value-to-encrypt.json"); + BsonDocument rangeOptions = getResourceAsDocument("fle2-find-range-explicit-v2/int32/rangeopts.json"); + BsonDocument expectedEncryptedPayload = getResourceAsDocument("fle2-find-range-explicit-v2/int32/encrypted-payload.json"); + + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("q83vqxI0mHYSNBI0VniQEg=="))) + .algorithm("Range") + .queryType("range") + .contentionFactor(4L) + .rangeOptions(rangeOptions) + .build(); + MongoCryptContext encryptor = mongoCrypt.createEncryptExpressionContext(valueToEncrypt, options); + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + + testKeyDecryptor(encryptor, "fle2-find-range-explicit-v2/int32/key-filter.json", "keys/ABCDEFAB123498761234123456789012-local-document.json"); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument actualEncryptedPayload = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(expectedEncryptedPayload, actualEncryptedPayload); + + encryptor.close(); + mongoCrypt.close(); + } + + @Test + public void testRangePreviewQueryTypeIsNotSupported() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument valueToEncrypt = getResourceAsDocument("fle2-find-range-explicit-v2/int32/value-to-encrypt.json"); + BsonDocument rangeOptions = getResourceAsDocument("fle2-find-range-explicit-v2/int32/rangeopts.json"); + + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("q83vqxI0mHYSNBI0VniQEg=="))) + .algorithm("Range") + .queryType("rangePreview") + .contentionFactor(4L) + .rangeOptions(rangeOptions) + .build(); + + MongoCryptException exp = assertThrows(MongoCryptException.class, () -> mongoCrypt.createEncryptExpressionContext(valueToEncrypt, options)); + assertEquals("Query type 'rangePreview' is deprecated, please use 'range'", exp.getMessage()); + mongoCrypt.close(); + } + + @Test + public void testRangePreviewAlgorithmIsNotSupported() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument rangeOptions = getResourceAsDocument("fle2-find-range-explicit-v2/int32/rangeopts.json"); + + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("q83vqxI0mHYSNBI0VniQEg=="))) + .algorithm("RangePreview") + .queryType("range") + .contentionFactor(4L) + .rangeOptions(rangeOptions) + .build()); + + assertEquals("Invalid configuration, contentionFactor can only be set if algorithm is 'Indexed' or 'Range'", + illegalStateException.getMessage()); + mongoCrypt.close(); + } + + @Test + public void testExplicitEncryptionDecryptionKeyAltName() throws IOException, URISyntaxException { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument documentToEncrypt = new BsonDocument("v", new BsonString("hello")); + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyAltName("altKeyName") + .algorithm("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .build(); + MongoCryptContext encryptor = mongoCrypt.createExplicitEncryptionContext(documentToEncrypt, options); + + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + testKeyDecryptor(encryptor, "key-filter-keyAltName.json", "key-document.json"); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument encryptedDocument = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(getResourceAsDocument("encrypted-value.json"), encryptedDocument); + + MongoCryptContext decryptor = mongoCrypt.createExplicitDecryptionContext(encryptedDocument); + + assertEquals(State.READY, decryptor.getState()); + + RawBsonDocument decryptedDocument = decryptor.finish(); + assertEquals(State.DONE, decryptor.getState()); + assertEquals(documentToEncrypt, decryptedDocument); + + encryptor.close(); + + mongoCrypt.close(); + } + + private void testKeyDecryptor(final MongoCryptContext context) { + testKeyDecryptor(context, "key-filter.json", "key-document.json"); + } + + private void testKeyDecryptor(final MongoCryptContext context, final String keyFilterPath, final String keyDocumentPath) { + BsonDocument keyFilter = context.getMongoOperation(); + assertEquals(getResourceAsDocument(keyFilterPath), keyFilter); + context.addMongoOperationResult(getResourceAsDocument(keyDocumentPath)); + context.completeMongoOperation(); + if (context.getState() == State.READY) { + return; + } + + assertEquals(State.NEED_KMS, context.getState()); + + MongoKeyDecryptor keyDecryptor = context.nextKeyDecryptor(); + assertEquals("aws", keyDecryptor.getKmsProvider()); + assertEquals("kms.us-east-1.amazonaws.com:443", keyDecryptor.getHostName()); + + ByteBuffer keyDecryptorMessage = keyDecryptor.getMessage(); + assertEquals(790, keyDecryptorMessage.remaining()); + + int bytesNeeded = keyDecryptor.bytesNeeded(); + assertEquals(1024, bytesNeeded); + + keyDecryptor.feed(getHttpResourceAsByteBuffer("kms-reply.txt")); + bytesNeeded = keyDecryptor.bytesNeeded(); + assertEquals(0, bytesNeeded); + + assertNull(context.nextKeyDecryptor()); + + context.completeKeyDecryptors(); + } + + private MongoCrypt createMongoCrypt() { + return MongoCrypts.create(MongoCryptOptions + .builder() + .awsKmsProviderOptions(MongoAwsKmsProviderOptions.builder() + .accessKeyId("example") + .secretAccessKey("example") + .build()) + .localKmsProviderOptions(MongoLocalKmsProviderOptions.builder() + .localMasterKey(ByteBuffer.wrap(new byte[96])) + .build()) + .build()); + } + + private static BsonDocument getResourceAsDocument(final String fileName) { + return BsonDocument.parse(getFileAsString(fileName, System.getProperty("line.separator"))); + } + + private static ByteBuffer getHttpResourceAsByteBuffer(final String fileName) { + return ByteBuffer.wrap(getFileAsString(fileName, "\r\n").getBytes(StandardCharsets.UTF_8)); + } + + private static String getFileAsString(final String fileName, final String lineSeparator) { + try { + URL resource = MongoCryptTest.class.getResource("/" + fileName); + if (resource == null) { + throw new RuntimeException("Could not find file " + fileName); + } + File file = new File(resource.toURI()); + StringBuilder stringBuilder = new StringBuilder(); + String line; + try (BufferedReader reader = new BufferedReader( + new InputStreamReader(Files.newInputStream(file.toPath()), StandardCharsets.UTF_8))) { + boolean first = true; + while ((line = reader.readLine()) != null) { + if (!first) { + stringBuilder.append(lineSeparator); + } + first = false; + stringBuilder.append(line); + } + } + return stringBuilder.toString(); + } catch (Throwable t) { + throw new RuntimeException("Could not parse file " + fileName, t); + } + } +} diff --git a/mongodb-crypt/src/test/resources/collection-info.json b/mongodb-crypt/src/test/resources/collection-info.json new file mode 100644 index 00000000000..3b9660938a3 --- /dev/null +++ b/mongodb-crypt/src/test/resources/collection-info.json @@ -0,0 +1,37 @@ +{ + "type": "collection", + "name": "test", + "idIndex": { + "ns": "test.test", + "name": "_id_", + "key": { + "_id": { + "$numberInt": "1" + } + }, + "v": { + "$numberInt": "2" + } + }, + "options": { + "validator": { + "$jsonSchema": { + "properties": { + "ssn": { + "encrypt": { + "keyId": { + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } + }, + "type": "string", + "algorithm": "AEAD_AES_CBC_HMAC_SHA512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/command-reply.json b/mongodb-crypt/src/test/resources/command-reply.json new file mode 100644 index 00000000000..c110f737f45 --- /dev/null +++ b/mongodb-crypt/src/test/resources/command-reply.json @@ -0,0 +1,13 @@ +{ + "cursor": { + "firstBatch": [ + { + "_id": 1, + "ssn": "457-55-5462" + } + ], + "id": 0, + "ns": "test.test" + }, + "ok": 1 +} diff --git a/mongodb-crypt/src/test/resources/command.json b/mongodb-crypt/src/test/resources/command.json new file mode 100644 index 00000000000..d04bf7799ad --- /dev/null +++ b/mongodb-crypt/src/test/resources/command.json @@ -0,0 +1,6 @@ +{ + "find": "test", + "filter": { + "ssn": "457-55-5462" + } +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/encrypted-command-reply.json b/mongodb-crypt/src/test/resources/encrypted-command-reply.json new file mode 100644 index 00000000000..73d4d3427ee --- /dev/null +++ b/mongodb-crypt/src/test/resources/encrypted-command-reply.json @@ -0,0 +1,16 @@ +{ + "cursor" : { + "firstBatch" : [ + { + "_id": 1, + "ssn": { + "$binary": "AWFhYWFhYWFhYWFhYWFhYWECRTOW9yZzNDn5dGwuqsrJQNLtgMEKaujhs9aRWRp+7Yo3JK8N8jC8P0Xjll6C1CwLsE/iP5wjOMhVv1KMMyOCSCrHorXRsb2IKPtzl2lKTqQ=", + "$type": "06" + } + } + ], + "id" : 0, + "ns" : "test.test" + }, + "ok" : 1 +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/encrypted-command.json b/mongodb-crypt/src/test/resources/encrypted-command.json new file mode 100644 index 00000000000..8b8cfaa27ee --- /dev/null +++ b/mongodb-crypt/src/test/resources/encrypted-command.json @@ -0,0 +1,11 @@ +{ + "filter": { + "ssn": { + "$binary": { + "base64": "AWFhYWFhYWFhYWFhYWFhYWECRTOW9yZzNDn5dGwuqsrJQNLtgMEKaujhs9aRWRp+7Yo3JK8N8jC8P0Xjll6C1CwLsE/iP5wjOMhVv1KMMyOCSCrHorXRsb2IKPtzl2lKTqQ=", + "subType": "06" + } + } + }, + "find": "test" +} diff --git a/mongodb-crypt/src/test/resources/encrypted-value.json b/mongodb-crypt/src/test/resources/encrypted-value.json new file mode 100644 index 00000000000..e1a832b5ecb --- /dev/null +++ b/mongodb-crypt/src/test/resources/encrypted-value.json @@ -0,0 +1,6 @@ +{ + "v": { + "$binary": "AWFhYWFhYWFhYWFhYWFhYWECW+zDjR/69eS6VtuMD5+O2lZw6JyiWOw3avI7mnUkdpKzPfvy8F/nlZrgZa2cGmQsb0TmLZuk5trldosnGKD91w==", + "$type": "06" + } +} diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/encrypted-payload.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/encrypted-payload.json new file mode 100644 index 00000000000..7db5540ca1b --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/encrypted-payload.json @@ -0,0 +1,26 @@ +{ + "v": { + "$and": [ + { + "age": { + "$gte": { + "$binary": { + "base64": "DQECAAADcGF5bG9hZACZAQAABGcAhQEAAAMwAH0AAAAFZAAgAAAAAInd0noBhIiJMv8QTjcfgRqnnVhxRJRRACLfvgT+CTR/BXMAIAAAAADm0EjqF/T4EmR6Dw6NaPLrL0OuzS4AFvm90czFluAAygVsACAAAAAA5MXcYWjYlzhPFUDebBEa17B5z2bupmaW9uCdtLjc7RkAAzEAfQAAAAVkACAAAAAA7lkNtT6RLw91aJ07K/blwlFs5wi9pQjqUXDcaCTxe98FcwAgAAAAAPwySffuLQihmF70Ot93KtaUMNU8KpmA+niyPRcvarNMBWwAIAAAAACDv6fJXXwRqwZH3O2kO+hdeLZ36U6bMZSui8kv0PsPtAADMgB9AAAABWQAIAAAAACcMWVTbZC4ox5VdjWeYKLgf4oBjpPlbTTAkucm9JPK0wVzACAAAAAA3tIww4ZTytkxFsUKyJbc3zwQ2w7DhkOqaNvX9g8pi3gFbAAgAAAAAGs9XR3Q1JpxV+HPW8P2GvCuCBF5bGZ8Kl1zHqzZcd5/AAASY20ABAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAgAAABBzZWNvbmRPcGVyYXRvcgAEAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + { + "age": { + "$lte": { + "$binary": { + "base64": "DTsAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgACAAAAEHNlY29uZE9wZXJhdG9yAAQAAAAA", + "subType": "06" + } + } + } + } + ] + } +} diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/key-filter.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/key-filter.json new file mode 100644 index 00000000000..897364761c7 --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/key-filter.json @@ -0,0 +1,19 @@ +{ + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": "q83vqxI0mHYSNBI0VniQEg==", + "$type": "04" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/rangeopts.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/rangeopts.json new file mode 100644 index 00000000000..2e1407fe4e6 --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/rangeopts.json @@ -0,0 +1,14 @@ +{ + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + }, + "sparsity": { + "$numberLong": "1" + }, + "trimFactor": { + "$numberInt": "1" + } +} diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/value-to-encrypt.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/value-to-encrypt.json new file mode 100644 index 00000000000..4c294e887e6 --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/value-to-encrypt.json @@ -0,0 +1,20 @@ +{ + "v": { + "$and": [ + { + "age": { + "$gte": { + "$numberInt": "23" + } + } + }, + { + "age": { + "$lte": { + "$numberInt": "35" + } + } + } + ] + } +} diff --git a/mongodb-crypt/src/test/resources/json-schema.json b/mongodb-crypt/src/test/resources/json-schema.json new file mode 100644 index 00000000000..059373d9ca1 --- /dev/null +++ b/mongodb-crypt/src/test/resources/json-schema.json @@ -0,0 +1,15 @@ +{ + "properties": { + "ssn": { + "encrypt": { + "keyId": { + "$binary": "YWFhYWFhYWFhYWFhYWFhYQ==", + "$type": "04" + }, + "type": "string", + "algorithm": "AEAD_AES_CBC_HMAC_SHA512-Deterministic" + } + } + }, + "bsonType": "object" +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/key-document.json b/mongodb-crypt/src/test/resources/key-document.json new file mode 100644 index 00000000000..5414072596d --- /dev/null +++ b/mongodb-crypt/src/test/resources/key-document.json @@ -0,0 +1,36 @@ +{ + "status": { + "$numberInt": "1" + }, + "_id": { + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } + }, + "masterKey": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "provider": "aws" + }, + "updateDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyAltNames": [ + "altKeyName", + "another_altname" + ] +} diff --git a/mongodb-crypt/src/test/resources/key-filter-keyAltName.json b/mongodb-crypt/src/test/resources/key-filter-keyAltName.json new file mode 100644 index 00000000000..eb53a142a14 --- /dev/null +++ b/mongodb-crypt/src/test/resources/key-filter-keyAltName.json @@ -0,0 +1,14 @@ +{ + "$or": [ + { + "_id": { + "$in": [] + } + }, + { + "keyAltNames": { + "$in": ["altKeyName"] + } + } + ] +} diff --git a/mongodb-crypt/src/test/resources/key-filter.json b/mongodb-crypt/src/test/resources/key-filter.json new file mode 100644 index 00000000000..9ad7c70e5a7 --- /dev/null +++ b/mongodb-crypt/src/test/resources/key-filter.json @@ -0,0 +1,19 @@ +{ + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": "YWFhYWFhYWFhYWFhYWFhYQ==", + "$type": "04" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/keys/ABCDEFAB123498761234123456789012-local-document.json b/mongodb-crypt/src/test/resources/keys/ABCDEFAB123498761234123456789012-local-document.json new file mode 100644 index 00000000000..e5d1a3f7661 --- /dev/null +++ b/mongodb-crypt/src/test/resources/keys/ABCDEFAB123498761234123456789012-local-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "27OBvUqHAuYFy60nwCdvq2xmZ4kFzVySphXzBGq+HEot13comCoydEfnltBzLTuXLbV9cnREFJIO5f0jMqrlkxIuvAV8yO84p5VJTEa8j/xSNe7iA594rx7UeKT0fOt4VqM47fht8h+8PZYc5JVezvEMvwk115IBCwENxDjLtT0g+y8Hf+aTUEGtxrYToH8zf1/Y7S16mHiIc4jK3/vxHw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648915408923" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648915408923" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/mongodb-crypt/src/test/resources/kms-reply.txt b/mongodb-crypt/src/test/resources/kms-reply.txt new file mode 100644 index 00000000000..c2c52e38413 --- /dev/null +++ b/mongodb-crypt/src/test/resources/kms-reply.txt @@ -0,0 +1,6 @@ +HTTP/1.1 200 OK +x-amzn-RequestId: deeb35e5-4ecb-4bf1-9af5-84a54ff0af0e +Content-Type: application/x-amz-json-1.1 +Content-Length: 233 + +{"KeyId": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", "Plaintext": "TqhXy3tKckECjy4/ZNykMWG8amBF46isVPzeOgeusKrwheBmYaU8TMG5AHR/NeUDKukqo8hBGgogiQOVpLPkqBQHD8YkLsNbDmHoGOill5QAHnniF/Lz405bGucB5TfR"} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/list-collections-filter.json b/mongodb-crypt/src/test/resources/list-collections-filter.json new file mode 100644 index 00000000000..2f37dc5b093 --- /dev/null +++ b/mongodb-crypt/src/test/resources/list-collections-filter.json @@ -0,0 +1,3 @@ +{ + "name": "test" +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/mongocryptd-command.json b/mongodb-crypt/src/test/resources/mongocryptd-command.json new file mode 100644 index 00000000000..2ec0612d7e9 --- /dev/null +++ b/mongodb-crypt/src/test/resources/mongocryptd-command.json @@ -0,0 +1,22 @@ +{ + "find": "test", + "filter": { + "ssn": "457-55-5462" + }, + "jsonSchema": { + "properties": { + "ssn": { + "encrypt": { + "keyId": { + "$binary": "YWFhYWFhYWFhYWFhYWFhYQ==", + "$type": "04" + }, + "type": "string", + "algorithm": "AEAD_AES_CBC_HMAC_SHA512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "isRemoteSchema": true +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/mongocryptd-reply.json b/mongodb-crypt/src/test/resources/mongocryptd-reply.json new file mode 100644 index 00000000000..0d1873de7e2 --- /dev/null +++ b/mongodb-crypt/src/test/resources/mongocryptd-reply.json @@ -0,0 +1,18 @@ +{ + "schemaRequiresEncryption": true, + "ok": { + "$numberInt": "1" + }, + "result": { + "filter": { + "ssn": { + "$binary": { + "base64": "ADgAAAAQYQABAAAABWtpABAAAAAEYWFhYWFhYWFhYWFhYWFhYQJ2AAwAAAA0NTctNTUtNTQ2MgAA", + "subType": "06" + } + } + }, + "find": "test" + }, + "hasEncryptedPlaceholders": true +} \ No newline at end of file diff --git a/settings.gradle b/settings.gradle index ab252727079..b1c5e185d37 100644 --- a/settings.gradle +++ b/settings.gradle @@ -29,6 +29,7 @@ include ':driver-kotlin-sync' include ':driver-kotlin-coroutine' include ':bson-scala' include ':driver-scala' +include ':mongodb-crypt' include 'util:spock' include 'util:taglets' include ':graalvm-native-image-app' From eea937cd4efc33187f52c1133052af71c41e972a Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Mon, 23 Sep 2024 09:12:20 -0600 Subject: [PATCH 77/90] Provide GraalVM metadata and substitutions (#1357) JAVA-5219 JAVA-5408 --- .../bson => }/native-image.properties | 0 .../META-INF/native-image/reflect-config.json | 17 + build.gradle | 1 + driver-core/build.gradle | 1 + .../main/com/mongodb/UnixServerAddress.java | 10 + .../mongodb/internal/dns/JndiDnsClient.java | 5 +- .../UnixServerAddressSubstitution.java | 31 ++ .../bson => }/native-image.properties | 3 +- .../META-INF/native-image/reflect-config.json | 83 +++++ .../native-image/resource-config.json | 2 + graalvm-native-image-app/build.gradle | 27 +- graalvm-native-image-app/readme.md | 12 +- .../graalvm/CustomDnsClientProvider.java | 61 ++++ .../com/mongodb/internal/graalvm/DnsSpi.java | 35 +- .../internal/graalvm/NativeImageApp.java | 20 +- .../internal/graalvm/Substitutions.java | 45 +++ .../META-INF/native-image/jni-config.json | 180 +--------- .../META-INF/native-image/reflect-config.json | 316 +----------------- .../native-image/resource-config.json | 36 +- .../com.mongodb.spi.dns.DnsClientProvider | 1 + 20 files changed, 357 insertions(+), 529 deletions(-) rename bson/src/main/resources/META-INF/native-image/{org.mongodb/bson => }/native-image.properties (100%) create mode 100644 bson/src/main/resources/META-INF/native-image/reflect-config.json create mode 100644 driver-core/src/main/com/mongodb/internal/graalvm/substitution/UnixServerAddressSubstitution.java rename driver-core/src/main/resources/META-INF/native-image/{org.mongodb/bson => }/native-image.properties (87%) create mode 100644 driver-core/src/main/resources/META-INF/native-image/reflect-config.json create mode 100644 graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomDnsClientProvider.java create mode 100644 graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/Substitutions.java create mode 100644 graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.DnsClientProvider diff --git a/bson/src/main/resources/META-INF/native-image/org.mongodb/bson/native-image.properties b/bson/src/main/resources/META-INF/native-image/native-image.properties similarity index 100% rename from bson/src/main/resources/META-INF/native-image/org.mongodb/bson/native-image.properties rename to bson/src/main/resources/META-INF/native-image/native-image.properties diff --git a/bson/src/main/resources/META-INF/native-image/reflect-config.json b/bson/src/main/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 00000000000..dd27feda44d --- /dev/null +++ b/bson/src/main/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,17 @@ +[ +{ + "name":"java.lang.Object", + "queryAllDeclaredMethods":true +}, +{ + "name":"sun.security.provider.NativePRNG", + "methods":[{"name":"","parameterTypes":[] }, {"name":"","parameterTypes":["java.security.SecureRandomParameters"] }] +}, +{ + "name":"sun.security.provider.SHA", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"org.slf4j.Logger" +} +] diff --git a/build.gradle b/build.gradle index 543e6de19ce..d9ebd912fb8 100644 --- a/build.gradle +++ b/build.gradle @@ -58,6 +58,7 @@ ext { projectReactorVersion = '2022.0.0' junitBomVersion = '5.10.2' logbackVersion = '1.3.14' + graalSdkVersion = '24.0.0' gitVersion = getGitVersion() } diff --git a/driver-core/build.gradle b/driver-core/build.gradle index 78ab607cc23..72cd74104f5 100644 --- a/driver-core/build.gradle +++ b/driver-core/build.gradle @@ -46,6 +46,7 @@ dependencies { api "io.netty:netty-buffer", optional api "io.netty:netty-transport", optional api "io.netty:netty-handler", optional + compileOnly "org.graalvm.sdk:graal-sdk:$graalSdkVersion" // Optionally depend on both AWS SDK v2 and v1. The driver will use v2 is present, v1 if present, or built-in functionality if // neither are present diff --git a/driver-core/src/main/com/mongodb/UnixServerAddress.java b/driver-core/src/main/com/mongodb/UnixServerAddress.java index bba882de794..8bd42052004 100644 --- a/driver-core/src/main/com/mongodb/UnixServerAddress.java +++ b/driver-core/src/main/com/mongodb/UnixServerAddress.java @@ -17,12 +17,14 @@ package com.mongodb; import com.mongodb.annotations.Immutable; +import com.mongodb.internal.graalvm.substitution.UnixServerAddressSubstitution; import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; /** * Represents the location of a MongoD unix domain socket. + * It is {@linkplain UnixServerAddressSubstitution not supported in GraalVM native image}. * *

      Requires the 'jnr.unixsocket' library.

      * @since 3.7 @@ -34,10 +36,18 @@ public final class UnixServerAddress extends ServerAddress { /** * Creates a new instance * @param path the path of the MongoD unix domain socket. + * @throws UnsupportedOperationException If {@linkplain UnixServerAddressSubstitution called in a GraalVM native image}. */ public UnixServerAddress(final String path) { super(notNull("The path cannot be null", path)); isTrueArgument("The path must end in .sock", path.endsWith(".sock")); + checkNotInGraalVmNativeImage(); + } + + /** + * @throws UnsupportedOperationException If {@linkplain UnixServerAddressSubstitution called in a GraalVM native image}. + */ + private static void checkNotInGraalVmNativeImage() { } @Override diff --git a/driver-core/src/main/com/mongodb/internal/dns/JndiDnsClient.java b/driver-core/src/main/com/mongodb/internal/dns/JndiDnsClient.java index a1e95cac68a..71df713fb8b 100644 --- a/driver-core/src/main/com/mongodb/internal/dns/JndiDnsClient.java +++ b/driver-core/src/main/com/mongodb/internal/dns/JndiDnsClient.java @@ -32,7 +32,10 @@ import java.util.Hashtable; import java.util.List; -final class JndiDnsClient implements DnsClient { +/** + *

      This class is not part of the public API and may be removed or changed at any time

      + */ +public final class JndiDnsClient implements DnsClient { @Override public List getResourceRecordData(final String name, final String type) throws DnsException { diff --git a/driver-core/src/main/com/mongodb/internal/graalvm/substitution/UnixServerAddressSubstitution.java b/driver-core/src/main/com/mongodb/internal/graalvm/substitution/UnixServerAddressSubstitution.java new file mode 100644 index 00000000000..9d52c730c1b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/graalvm/substitution/UnixServerAddressSubstitution.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm.substitution; + +import com.mongodb.UnixServerAddress; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; + +@TargetClass(UnixServerAddress.class) +public final class UnixServerAddressSubstitution { + @Substitute + private static void checkNotInGraalVmNativeImage() { + throw new UnsupportedOperationException("UnixServerAddress is not supported in GraalVM native image"); + } + + private UnixServerAddressSubstitution() { + } +} diff --git a/driver-core/src/main/resources/META-INF/native-image/org.mongodb/bson/native-image.properties b/driver-core/src/main/resources/META-INF/native-image/native-image.properties similarity index 87% rename from driver-core/src/main/resources/META-INF/native-image/org.mongodb/bson/native-image.properties rename to driver-core/src/main/resources/META-INF/native-image/native-image.properties index 74579722773..49541a06e0e 100644 --- a/driver-core/src/main/resources/META-INF/native-image/org.mongodb/bson/native-image.properties +++ b/driver-core/src/main/resources/META-INF/native-image/native-image.properties @@ -18,4 +18,5 @@ Args =\ com.mongodb.UnixServerAddress,\ com.mongodb.internal.connection.SnappyCompressor,\ com.mongodb.internal.connection.ClientMetadataHelper,\ - com.mongodb.internal.connection.ServerAddressHelper + com.mongodb.internal.connection.ServerAddressHelper,\ + com.mongodb.internal.dns.DefaultDnsResolver diff --git a/driver-core/src/main/resources/META-INF/native-image/reflect-config.json b/driver-core/src/main/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 00000000000..9a89dbe1e1f --- /dev/null +++ b/driver-core/src/main/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,83 @@ +[ +{ + "name":"com.mongodb.BasicDBObject", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.MongoNamespace", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true +}, +{ + "name":"com.mongodb.WriteConcern", + "allPublicFields":true +}, +{ + "name":"com.mongodb.client.model.changestream.ChangeStreamDocument", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":["java.lang.String","org.bson.BsonDocument","org.bson.BsonDocument","org.bson.BsonDocument","java.lang.Object","java.lang.Object","org.bson.BsonDocument","org.bson.BsonTimestamp","com.mongodb.client.model.changestream.UpdateDescription","org.bson.BsonInt64","org.bson.BsonDocument","org.bson.BsonDateTime","com.mongodb.client.model.changestream.SplitEvent","org.bson.BsonDocument"] }] +}, +{ + "name":"com.mongodb.client.model.changestream.SplitEvent", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true +}, +{ + "name":"com.mongodb.client.model.changestream.TruncatedArray", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true +}, +{ + "name":"com.mongodb.client.model.changestream.UpdateDescription", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":["java.util.List","org.bson.BsonDocument","java.util.List","org.bson.BsonDocument"] }] +}, +{ + "name":"java.lang.Record" +}, +{ + "name":"java.lang.Thread", + "fields":[{"name":"threadLocalRandomProbe"}] +}, +{ + "name":"java.net.Socket", + "methods":[{"name":"setOption","parameterTypes":["java.net.SocketOption","java.lang.Object"] }] +}, +{ + "name":"java.security.SecureRandomParameters" +}, +{ + "name":"java.util.concurrent.ForkJoinTask", + "fields":[{"name":"aux"}, {"name":"status"}] +}, +{ + "name":"java.util.concurrent.atomic.Striped64", + "fields":[{"name":"base"}, {"name":"cellsBusy"}] +}, +{ + "name":"jdk.internal.misc.Unsafe" +}, +{ + "name":"jdk.net.ExtendedSocketOptions", + "fields":[{"name":"TCP_KEEPCOUNT"}, {"name":"TCP_KEEPIDLE"}, {"name":"TCP_KEEPINTERVAL"}] +}, +{ + "name":"org.bson.codecs.kotlin.DataClassCodecProvider" +}, +{ + "name":"org.bson.codecs.kotlinx.KotlinSerializerCodecProvider" +}, +{ + "name":"org.bson.codecs.record.RecordCodecProvider" +}, +{ + "name":"org.slf4j.Logger" +} +] diff --git a/driver-core/src/main/resources/META-INF/native-image/resource-config.json b/driver-core/src/main/resources/META-INF/native-image/resource-config.json index 8c008c9938d..43d3d5bb969 100644 --- a/driver-core/src/main/resources/META-INF/native-image/resource-config.json +++ b/driver-core/src/main/resources/META-INF/native-image/resource-config.json @@ -1,6 +1,8 @@ { "resources":{ "includes":[{ + "pattern":"\\QMETA-INF/services/com.mongodb.spi.dns.DnsClientProvider\\E" + }, { "pattern":"\\QMETA-INF/services/com.mongodb.spi.dns.InetAddressResolverProvider\\E" }]}, "bundles":[] diff --git a/graalvm-native-image-app/build.gradle b/graalvm-native-image-app/build.gradle index d6bc5a7b6cb..713b8c29a1a 100644 --- a/graalvm-native-image-app/build.gradle +++ b/graalvm-native-image-app/build.gradle @@ -40,9 +40,31 @@ graalvmNative { // The same is true about executing the `metadataCopy` Gradle task. // This may be a manifestation of an issue with the `org.graalvm.buildtools.native` plugin. enabled = false - defaultMode = 'standard' + defaultMode = 'direct' + def taskExecutedWithAgentAttached = 'run' + modes { + direct { + // see https://www.graalvm.org/latest/reference-manual/native-image/metadata/ExperimentalAgentOptions + options.add("config-output-dir=$buildDir/native/agent-output/$taskExecutedWithAgentAttached") + // `experimental-configuration-with-origins` produces + // `graalvm-native-image-app/build/native/agent-output/run/reflect-origins.txt` + // and similar files that explain the origin of each of the reachability metadata piece. + // However, for some reason, the actual reachability metadata is not generated when this option is enabled, + // so enable it manually if you need an explanation for a specific reachability metadata entry, + // and expect the build to fail. + // options.add('experimental-configuration-with-origins') + + // `experimental-class-define-support` does not seem to do what it is supposed to do. + // We need this option to work if we want to support `UnixServerAddress` in native image. + // Unfortunately, the tracing agent neither generates the metadata in + // `graalvm-native-image-app/src/main/resources/META-INF/native-image/proxy-config.json`, + // nor does it extract the bytecode of the generated classes to + // `graalvm-native-image-app/src/main/resources/META-INF/native-image/agent-extracted-predefined-classes`. + options.add('experimental-class-define-support') + } + } metadataCopy { - inputTaskNames.add('run') + inputTaskNames.add(taskExecutedWithAgentAttached) outputDirectories.add('src/main/resources/META-INF/native-image') mergeWithExisting = false } @@ -93,4 +115,5 @@ dependencies { implementation "ch.qos.logback:logback-classic:$logbackVersion" implementation platform("io.projectreactor:reactor-bom:$projectReactorVersion") implementation 'io.projectreactor:reactor-core' + implementation "org.graalvm.sdk:nativeimage:$graalSdkVersion" } diff --git a/graalvm-native-image-app/readme.md b/graalvm-native-image-app/readme.md index bb974fdd063..a659b7d1c07 100644 --- a/graalvm-native-image-app/readme.md +++ b/graalvm-native-image-app/readme.md @@ -47,12 +47,12 @@ you need to inform Gradle about that location as specified in https://docs.gradl Assuming that your MongoDB deployment is accessible at `mongodb://localhost:27017`, run from the driver project root directory: -| # | Command | Description | -|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| -| 0 | `env JAVA_HOME="${JDK17}" ./gradlew -PjavaVersion=21 :graalvm-native-image-app:nativeCompile` | Build the application relying on the reachability metadata stored in `graalvm-native-image-app/src/main/resources/META-INF/native-image`. | -| 1 | `env JAVA_HOME="${JDK17}" ./gradlew :graalvm-native-image-app:clean && env JAVA_HOME=${JDK21_GRAALVM} ./gradlew -PjavaVersion=21 -Pagent :graalvm-native-image-app:run && env JAVA_HOME=${JDK21_GRAALVM} ./gradlew :graalvm-native-image-app:metadataCopy` | Collect the reachability metadata and update the files storing it. Do this before building the application only if building fails otherwise. | -| 2 | `./graalvm-native-image-app/build/native/nativeCompile/NativeImageApp` | Run the application that has been built. | -| 3 | `env JAVA_HOME="${JDK17}" ./gradlew -PjavaVersion=21 :graalvm-native-image-app:nativeRun` | Run the application using Gradle, build it if necessary relying on the stored reachability metadata. | +| # | Command | Description | +|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| +| 0 | `env JAVA_HOME="${JDK17}" ./gradlew -PjavaVersion=21 :graalvm-native-image-app:nativeCompile` | Build the application relying on the reachability metadata stored in `graalvm-native-image-app/src/main/resources/META-INF/native-image`. | +| 1 | `env JAVA_HOME="${JDK17}" ./gradlew clean && env JAVA_HOME=${JDK21_GRAALVM} ./gradlew -PjavaVersion=21 -Pagent :graalvm-native-image-app:run && env JAVA_HOME=${JDK21_GRAALVM} ./gradlew :graalvm-native-image-app:metadataCopy` | Collect the reachability metadata and update the files storing it. Do this before building the application only if building fails otherwise. | +| 2 | `./graalvm-native-image-app/build/native/nativeCompile/NativeImageApp` | Run the application that has been built. | +| 3 | `env JAVA_HOME="${JDK17}" ./gradlew -PjavaVersion=21 :graalvm-native-image-app:nativeRun` | Run the application using Gradle, build it if necessary relying on the stored reachability metadata. | #### Specifying a custom connection string diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomDnsClientProvider.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomDnsClientProvider.java new file mode 100644 index 00000000000..696d37becd0 --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomDnsClientProvider.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm; + +import com.mongodb.internal.dns.JndiDnsClient; +import com.mongodb.spi.dns.DnsClient; +import com.mongodb.spi.dns.DnsClientProvider; +import com.mongodb.spi.dns.DnsException; + +import java.util.List; + +import static java.lang.String.format; + +public final class CustomDnsClientProvider implements DnsClientProvider { + private static volatile boolean used = false; + + public CustomDnsClientProvider() { + } + + @Override + public DnsClient create() { + return new CustomDnsClient(); + } + + static void assertUsed() throws AssertionError { + if (!used) { + throw new AssertionError(format("%s is not used", CustomDnsClientProvider.class.getSimpleName())); + } + } + + private static void markUsed() { + used = true; + } + + private static final class CustomDnsClient implements DnsClient { + private final JndiDnsClient wrapped; + + CustomDnsClient() { + wrapped = new JndiDnsClient(); + } + + @Override + public List getResourceRecordData(final String name, final String type) throws DnsException { + markUsed(); + return wrapped.getResourceRecordData(name, type); + } + } +} diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/DnsSpi.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/DnsSpi.java index acfbb624629..e1d6ad72bfd 100644 --- a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/DnsSpi.java +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/DnsSpi.java @@ -15,23 +15,52 @@ */ package com.mongodb.internal.graalvm; +import com.mongodb.MongoClientSettings; import com.mongodb.client.MongoClient; import com.mongodb.client.MongoClients; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.concurrent.TimeUnit; final class DnsSpi { private static final Logger LOGGER = LoggerFactory.getLogger(DnsSpi.class); public static void main(final String... args) { - LOGGER.info("Begin"); + useInetAddressResolverProvider(args); + useDnsClientProvider(); + } + + private static void useInetAddressResolverProvider(final String... args) { try (MongoClient client = args.length == 0 ? MongoClients.create() : MongoClients.create(args[0])) { - LOGGER.info("Database names: {}", client.listDatabaseNames().into(new ArrayList<>())); + ArrayList databaseNames = client.listDatabaseNames().into(new ArrayList<>()); + LOGGER.info("Database names: {}", databaseNames); } CustomInetAddressResolverProvider.assertUsed(); - LOGGER.info("End"); + } + + private static void useDnsClientProvider() { + try (MongoClient client = MongoClients.create(MongoClientSettings.builder() + .applyToClusterSettings(builder -> builder + .srvHost("a.b.c") + // `MongoClient` uses `CustomDnsClientProvider` asynchronously, + // and by waiting for server selection that cannot succeed due to `a.b.c` not resolving to an IP address, + // we give `MongoClient` enough time to use `CustomDnsClientProvider`. + // This is a tolerable race condition for a test. + .serverSelectionTimeout(2, TimeUnit.SECONDS)) + .build())) { + ArrayList databaseNames = client.listDatabaseNames().into(new ArrayList<>()); + LOGGER.info("Database names: {}", databaseNames); + } catch (RuntimeException e) { + try { + CustomDnsClientProvider.assertUsed(); + } catch (AssertionError err) { + err.addSuppressed(e); + throw err; + } + // an exception is expected because `a.b.c` does not resolve to an IP address + } } private DnsSpi() { diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/NativeImageApp.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/NativeImageApp.java index 6c42ff21df3..59778d7686f 100644 --- a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/NativeImageApp.java +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/NativeImageApp.java @@ -35,6 +35,8 @@ public static void main(final String[] args) { String[] arguments = new String[] {getConnectionStringSystemPropertyOrDefault()}; LOGGER.info("proper args={}, tour/example arguments={}", Arrays.toString(args), Arrays.toString(arguments)); List errors = Stream.of( + new ThrowingRunnable.Named(Substitutions.class, + () -> Substitutions.main(arguments)), new ThrowingRunnable.Named(DnsSpi.class, () -> DnsSpi.main(arguments)), new ThrowingRunnable.Named(gridfs.GridFSTour.class, @@ -109,11 +111,21 @@ default Throwable runAndCatch() { final class Named implements ThrowingRunnable { private final String name; - private final ThrowingRunnable runnable; + private final ThrowingRunnable loggingRunnable; Named(final String name, final ThrowingRunnable runnable) { this.name = name; - this.runnable = runnable; + this.loggingRunnable = () -> { + LOGGER.info("Begin {}", name); + try { + runnable.run(); + } catch (Exception | AssertionError e) { + LOGGER.info("Failure in {}", name, e); + throw e; + } finally { + LOGGER.info("End {}", name); + } + }; } Named(final Class mainClass, final ThrowingRunnable runnable) { @@ -122,13 +134,13 @@ final class Named implements ThrowingRunnable { @Override public void run() throws Exception { - runnable.run(); + loggingRunnable.run(); } @Override @Nullable public Throwable runAndCatch() { - Throwable t = runnable.runAndCatch(); + Throwable t = loggingRunnable.runAndCatch(); if (t != null) { t = new AssertionError(name, t); } diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/Substitutions.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/Substitutions.java new file mode 100644 index 00000000000..e21d6e6d0bb --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/Substitutions.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm; + +import com.mongodb.UnixServerAddress; +import com.mongodb.internal.graalvm.substitution.UnixServerAddressSubstitution; + +import static com.mongodb.assertions.Assertions.fail; +import static org.graalvm.nativeimage.ImageInfo.inImageRuntimeCode; + +final class Substitutions { + public static void main(final String... args) { + assertUnixServerAddressSubstitution(); + } + + private static void assertUnixServerAddressSubstitution() { + try { + new UnixServerAddress("/tmp/mongodb-27017.sock"); + if (inImageRuntimeCode()) { + fail(String.format("%s was not applied", UnixServerAddressSubstitution.class)); + } + } catch (UnsupportedOperationException e) { + if (!inImageRuntimeCode()) { + throw e; + } + // expected in GraalVM + } + } + + private Substitutions() { + } +} diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/jni-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/jni-config.json index c1dcb7f2ded..2be5d0ca308 100644 --- a/graalvm-native-image-app/src/main/resources/META-INF/native-image/jni-config.json +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/jni-config.json @@ -1,184 +1,6 @@ [ -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_crypto_fn", - "methods":[{"name":"crypt","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hash_fn", - "methods":[{"name":"hash","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hmac_fn", - "methods":[{"name":"hmac","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_log_fn_t", - "methods":[{"name":"log","parameterTypes":["int","com.mongodb.crypt.capi.CAPI$cstring","int","com.sun.jna.Pointer"] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_random_fn", - "methods":[{"name":"random","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","int","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] -}, { "name":"com.mongodb.internal.graalvm.NativeImageApp", "methods":[{"name":"main","parameterTypes":["java.lang.String[]"] }] -}, -{ - "name":"com.sun.jna.Callback" -}, -{ - "name":"com.sun.jna.CallbackReference", - "methods":[{"name":"getCallback","parameterTypes":["java.lang.Class","com.sun.jna.Pointer","boolean"] }, {"name":"getFunctionPointer","parameterTypes":["com.sun.jna.Callback","boolean"] }, {"name":"getNativeString","parameterTypes":["java.lang.Object","boolean"] }, {"name":"initializeThread","parameterTypes":["com.sun.jna.Callback","com.sun.jna.CallbackReference$AttachOptions"] }] -}, -{ - "name":"com.sun.jna.CallbackReference$AttachOptions" -}, -{ - "name":"com.sun.jna.FromNativeConverter", - "methods":[{"name":"nativeType","parameterTypes":[] }] -}, -{ - "name":"com.sun.jna.IntegerType", - "fields":[{"name":"value"}] -}, -{ - "name":"com.sun.jna.JNIEnv" -}, -{ - "name":"com.sun.jna.Native", - "methods":[{"name":"dispose","parameterTypes":[] }, {"name":"fromNative","parameterTypes":["com.sun.jna.FromNativeConverter","java.lang.Object","java.lang.reflect.Method"] }, {"name":"fromNative","parameterTypes":["java.lang.Class","java.lang.Object"] }, {"name":"fromNative","parameterTypes":["java.lang.reflect.Method","java.lang.Object"] }, {"name":"nativeType","parameterTypes":["java.lang.Class"] }, {"name":"toNative","parameterTypes":["com.sun.jna.ToNativeConverter","java.lang.Object"] }] -}, -{ - "name":"com.sun.jna.Native$ffi_callback", - "methods":[{"name":"invoke","parameterTypes":["long","long","long"] }] -}, -{ - "name":"com.sun.jna.NativeMapped", - "methods":[{"name":"toNative","parameterTypes":[] }] -}, -{ - "name":"com.sun.jna.Pointer", - "fields":[{"name":"peer"}], - "methods":[{"name":"","parameterTypes":["long"] }] -}, -{ - "name":"com.sun.jna.PointerType", - "fields":[{"name":"pointer"}] -}, -{ - "name":"com.sun.jna.Structure", - "fields":[{"name":"memory"}, {"name":"typeInfo"}], - "methods":[{"name":"autoRead","parameterTypes":[] }, {"name":"autoWrite","parameterTypes":[] }, {"name":"getTypeInfo","parameterTypes":[] }, {"name":"newInstance","parameterTypes":["java.lang.Class","long"] }] -}, -{ - "name":"com.sun.jna.Structure$ByValue" -}, -{ - "name":"com.sun.jna.Structure$FFIType$FFITypes", - "fields":[{"name":"ffi_type_double"}, {"name":"ffi_type_float"}, {"name":"ffi_type_longdouble"}, {"name":"ffi_type_pointer"}, {"name":"ffi_type_sint16"}, {"name":"ffi_type_sint32"}, {"name":"ffi_type_sint64"}, {"name":"ffi_type_sint8"}, {"name":"ffi_type_uint16"}, {"name":"ffi_type_uint32"}, {"name":"ffi_type_uint64"}, {"name":"ffi_type_uint8"}, {"name":"ffi_type_void"}] -}, -{ - "name":"com.sun.jna.WString", - "methods":[{"name":"","parameterTypes":["java.lang.String"] }] -}, -{ - "name":"java.lang.Boolean", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["boolean"] }, {"name":"getBoolean","parameterTypes":["java.lang.String"] }] -}, -{ - "name":"java.lang.Byte", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["byte"] }] -}, -{ - "name":"java.lang.Character", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["char"] }] -}, -{ - "name":"java.lang.Class", - "methods":[{"name":"getComponentType","parameterTypes":[] }] -}, -{ - "name":"java.lang.Double", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["double"] }] -}, -{ - "name":"java.lang.Float", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["float"] }] -}, -{ - "name":"java.lang.Integer", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["int"] }] -}, -{ - "name":"java.lang.Long", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["long"] }] -}, -{ - "name":"java.lang.Object", - "methods":[{"name":"toString","parameterTypes":[] }] -}, -{ - "name":"java.lang.Short", - "fields":[{"name":"TYPE"}, {"name":"value"}], - "methods":[{"name":"","parameterTypes":["short"] }] -}, -{ - "name":"java.lang.String", - "methods":[{"name":"","parameterTypes":["byte[]"] }, {"name":"","parameterTypes":["byte[]","java.lang.String"] }, {"name":"getBytes","parameterTypes":[] }, {"name":"getBytes","parameterTypes":["java.lang.String"] }, {"name":"lastIndexOf","parameterTypes":["int"] }, {"name":"substring","parameterTypes":["int"] }, {"name":"toCharArray","parameterTypes":[] }] -}, -{ - "name":"java.lang.System", - "methods":[{"name":"getProperty","parameterTypes":["java.lang.String"] }, {"name":"setProperty","parameterTypes":["java.lang.String","java.lang.String"] }] -}, -{ - "name":"java.lang.UnsatisfiedLinkError", - "methods":[{"name":"","parameterTypes":["java.lang.String"] }] -}, -{ - "name":"java.lang.Void", - "fields":[{"name":"TYPE"}] -}, -{ - "name":"java.lang.reflect.Method", - "methods":[{"name":"getParameterTypes","parameterTypes":[] }, {"name":"getReturnType","parameterTypes":[] }] -}, -{ - "name":"java.nio.Buffer", - "methods":[{"name":"position","parameterTypes":[] }] -}, -{ - "name":"java.nio.ByteBuffer", - "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] -}, -{ - "name":"java.nio.CharBuffer", - "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] -}, -{ - "name":"java.nio.DoubleBuffer", - "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] -}, -{ - "name":"java.nio.FloatBuffer", - "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] -}, -{ - "name":"java.nio.IntBuffer", - "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] -}, -{ - "name":"java.nio.LongBuffer", - "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] -}, -{ - "name":"java.nio.ShortBuffer", - "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] } -] \ No newline at end of file +] diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/reflect-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/reflect-config.json index 29ded3e5f40..609320d4645 100644 --- a/graalvm-native-image-app/src/main/resources/META-INF/native-image/reflect-config.json +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/reflect-config.json @@ -1,8 +1,4 @@ [ -{ - "name":"boolean", - "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] -}, { "name":"ch.qos.logback.classic.encoder.PatternLayoutEncoder", "queryAllPublicMethods":true, @@ -65,176 +61,19 @@ "name":"ch.qos.logback.core.spi.ContextAware", "methods":[{"name":"valueOf","parameterTypes":["java.lang.String"] }] }, -{ - "name":"com.mongodb.BasicDBObject", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.mongodb.MongoNamespace", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true -}, -{ - "name":"com.mongodb.WriteConcern", - "allPublicFields":true -}, -{ - "name":"com.mongodb.client.model.changestream.ChangeStreamDocument", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true, - "methods":[{"name":"","parameterTypes":["java.lang.String","org.bson.BsonDocument","org.bson.BsonDocument","org.bson.BsonDocument","java.lang.Object","java.lang.Object","org.bson.BsonDocument","org.bson.BsonTimestamp","com.mongodb.client.model.changestream.UpdateDescription","org.bson.BsonInt64","org.bson.BsonDocument","org.bson.BsonDateTime","com.mongodb.client.model.changestream.SplitEvent","org.bson.BsonDocument"] }] -}, -{ - "name":"com.mongodb.client.model.changestream.SplitEvent", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true -}, -{ - "name":"com.mongodb.client.model.changestream.TruncatedArray", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true -}, -{ - "name":"com.mongodb.client.model.changestream.UpdateDescription", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true, - "methods":[{"name":"","parameterTypes":["java.util.List","org.bson.BsonDocument","java.util.List","org.bson.BsonDocument"] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI", - "allPublicFields":true, - "queryAllDeclaredMethods":true -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$cstring", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_crypto_fn", - "queryAllDeclaredMethods":true, - "queryAllPublicMethods":true -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_ctx_t", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hash_fn", - "queryAllDeclaredMethods":true, - "queryAllPublicMethods":true -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hmac_fn", - "queryAllDeclaredMethods":true, - "queryAllPublicMethods":true -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_kms_ctx_t", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_log_fn_t", - "queryAllDeclaredMethods":true, - "queryAllPublicMethods":true -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_random_fn", - "queryAllDeclaredMethods":true, - "queryAllPublicMethods":true -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_status_t", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_t", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.sun.crypto.provider.AESCipher$General", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.sun.crypto.provider.HmacCore$HmacSHA256", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.sun.crypto.provider.HmacCore$HmacSHA512", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.sun.jna.CallbackProxy", - "methods":[{"name":"callback","parameterTypes":["java.lang.Object[]"] }] -}, -{ - "name":"com.sun.jna.Pointer", - "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] -}, -{ - "name":"com.sun.jna.Structure$FFIType", - "allDeclaredFields":true, - "queryAllPublicConstructors":true, - "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}], - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.sun.jna.Structure$FFIType$size_t", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"com.sun.jna.ptr.PointerByReference", - "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}], - "methods":[{"name":"","parameterTypes":[] }] -}, { "name":"com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl", "methods":[{"name":"","parameterTypes":[] }] }, -{ - "name":"int", - "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] -}, { "name":"java.io.FilePermission" }, -{ - "name":"java.lang.Object", - "queryAllDeclaredMethods":true -}, -{ - "name":"java.lang.Record" -}, { "name":"java.lang.RuntimePermission" }, -{ - "name":"java.lang.Thread", - "fields":[{"name":"threadLocalRandomProbe"}] -}, -{ - "name":"java.lang.Throwable", - "methods":[{"name":"addSuppressed","parameterTypes":["java.lang.Throwable"] }] -}, -{ - "name":"java.lang.reflect.Method", - "methods":[{"name":"isVarArgs","parameterTypes":[] }] -}, { "name":"java.net.NetPermission" }, -{ - "name":"java.net.Socket", - "methods":[{"name":"setOption","parameterTypes":["java.net.SocketOption","java.lang.Object"] }] -}, { "name":"java.net.SocketPermission" }, @@ -242,25 +81,15 @@ "name":"java.net.URLPermission", "methods":[{"name":"","parameterTypes":["java.lang.String","java.lang.String"] }] }, -{ - "name":"java.nio.Buffer" -}, { "name":"java.security.AllPermission" }, -{ - "name":"java.security.SecureRandomParameters" -}, { "name":"java.security.SecurityPermission" }, { "name":"java.util.PropertyPermission" }, -{ - "name":"java.util.concurrent.ForkJoinTask", - "fields":[{"name":"aux"}, {"name":"status"}] -}, { "name":"java.util.concurrent.atomic.AtomicBoolean", "fields":[{"name":"value"}] @@ -269,36 +98,9 @@ "name":"java.util.concurrent.atomic.AtomicReference", "fields":[{"name":"value"}] }, -{ - "name":"java.util.concurrent.atomic.Striped64", - "fields":[{"name":"base"}, {"name":"cellsBusy"}] -}, { "name":"javax.smartcardio.CardPermission" }, -{ - "name":"jdk.internal.misc.Unsafe" -}, -{ - "name":"jdk.net.ExtendedSocketOptions", - "fields":[{"name":"TCP_KEEPCOUNT"}, {"name":"TCP_KEEPIDLE"}, {"name":"TCP_KEEPINTERVAL"}] -}, -{ - "name":"long", - "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] -}, -{ - "name":"org.bson.codecs.kotlin.DataClassCodecProvider" -}, -{ - "name":"org.bson.codecs.kotlinx.KotlinSerializerCodecProvider" -}, -{ - "name":"org.bson.codecs.record.RecordCodecProvider" -}, -{ - "name":"org.slf4j.Logger" -}, { "name":"reactivestreams.tour.Address", "allDeclaredFields":true, @@ -313,118 +115,6 @@ "queryAllDeclaredConstructors":true, "methods":[{"name":"","parameterTypes":[] }, {"name":"getAddress","parameterTypes":[] }, {"name":"getAge","parameterTypes":[] }, {"name":"getId","parameterTypes":[] }, {"name":"getName","parameterTypes":[] }, {"name":"setAddress","parameterTypes":["reactivestreams.tour.Address"] }, {"name":"setAge","parameterTypes":["int"] }, {"name":"setId","parameterTypes":["org.bson.types.ObjectId"] }, {"name":"setName","parameterTypes":["java.lang.String"] }] }, -{ - "name":"reactor.core.publisher.BaseSubscriber", - "fields":[{"name":"subscription"}] -}, -{ - "name":"reactor.core.publisher.FlatMapTracker", - "fields":[{"name":"size"}] -}, -{ - "name":"reactor.core.publisher.FluxConcatArray$ConcatArraySubscriber", - "fields":[{"name":"requested"}] -}, -{ - "name":"reactor.core.publisher.FluxCreate$BaseSink", - "fields":[{"name":"disposable"}, {"name":"requestConsumer"}, {"name":"requested"}] -}, -{ - "name":"reactor.core.publisher.FluxCreate$BufferAsyncSink", - "fields":[{"name":"wip"}] -}, -{ - "name":"reactor.core.publisher.FluxCreate$SerializedFluxSink", - "fields":[{"name":"error"}, {"name":"wip"}] -}, -{ - "name":"reactor.core.publisher.FluxDoFinally$DoFinallySubscriber", - "fields":[{"name":"once"}] -}, -{ - "name":"reactor.core.publisher.FluxFlatMap$FlatMapInner", - "fields":[{"name":"s"}] -}, -{ - "name":"reactor.core.publisher.FluxFlatMap$FlatMapMain", - "fields":[{"name":"error"}, {"name":"requested"}, {"name":"wip"}] -}, -{ - "name":"reactor.core.publisher.FluxIterable$IterableSubscription", - "fields":[{"name":"requested"}] -}, -{ - "name":"reactor.core.publisher.LambdaMonoSubscriber", - "fields":[{"name":"subscription"}] -}, -{ - "name":"reactor.core.publisher.LambdaSubscriber", - "fields":[{"name":"subscription"}] -}, -{ - "name":"reactor.core.publisher.MonoCallable$MonoCallableSubscription", - "fields":[{"name":"requestedOnce"}] -}, -{ - "name":"reactor.core.publisher.MonoCreate$DefaultMonoSink", - "fields":[{"name":"disposable"}, {"name":"requestConsumer"}, {"name":"state"}] -}, -{ - "name":"reactor.core.publisher.MonoFlatMap$FlatMapMain", - "fields":[{"name":"second"}] -}, -{ - "name":"reactor.core.publisher.MonoFlatMapMany$FlatMapManyMain", - "fields":[{"name":"inner"}, {"name":"requested"}] -}, -{ - "name":"reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain", - "fields":[{"name":"state"}] -}, -{ - "name":"reactor.core.publisher.MonoNext$NextSubscriber", - "fields":[{"name":"wip"}] -}, -{ - "name":"reactor.core.publisher.Operators$BaseFluxToMonoOperator", - "fields":[{"name":"state"}] -}, -{ - "name":"reactor.core.publisher.Operators$MultiSubscriptionSubscriber", - "fields":[{"name":"missedProduced"}, {"name":"missedRequested"}, {"name":"missedSubscription"}, {"name":"wip"}] -}, -{ - "name":"reactor.core.publisher.StrictSubscriber", - "fields":[{"name":"error"}, {"name":"requested"}, {"name":"s"}, {"name":"wip"}] -}, -{ - "name":"reactor.util.concurrent.MpscLinkedQueue", - "fields":[{"name":"consumerNode"}, {"name":"producerNode"}] -}, -{ - "name":"reactor.util.concurrent.MpscLinkedQueue$LinkedQueueNode", - "fields":[{"name":"next"}] -}, -{ - "name":"reactor.util.concurrent.SpscLinkedArrayQueue", - "fields":[{"name":"consumerIndex"}, {"name":"producerIndex"}] -}, -{ - "name":"sun.security.provider.NativePRNG", - "methods":[{"name":"","parameterTypes":[] }, {"name":"","parameterTypes":["java.security.SecureRandomParameters"] }] -}, -{ - "name":"sun.security.provider.SHA", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"sun.security.provider.SHA2$SHA256", - "methods":[{"name":"","parameterTypes":[] }] -}, -{ - "name":"sun.security.provider.SHA5$SHA512", - "methods":[{"name":"","parameterTypes":[] }] -}, { "name":"tour.Address", "allDeclaredFields":true, @@ -438,9 +128,5 @@ "queryAllDeclaredMethods":true, "queryAllDeclaredConstructors":true, "methods":[{"name":"","parameterTypes":[] }, {"name":"getAddress","parameterTypes":[] }, {"name":"getAge","parameterTypes":[] }, {"name":"getId","parameterTypes":[] }, {"name":"getName","parameterTypes":[] }, {"name":"setAddress","parameterTypes":["tour.Address"] }, {"name":"setAge","parameterTypes":["int"] }, {"name":"setId","parameterTypes":["org.bson.types.ObjectId"] }, {"name":"setName","parameterTypes":["java.lang.String"] }] -}, -{ - "name":"void", - "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] } -] \ No newline at end of file +] diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/resource-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/resource-config.json index ece741c68e4..d4bf7ea12ff 100644 --- a/graalvm-native-image-app/src/main/resources/META-INF/native-image/resource-config.json +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/resource-config.json @@ -1,24 +1,6 @@ { "resources":{ "includes":[{ - "pattern":"\\QMETA-INF/services/ch.qos.logback.classic.spi.Configurator\\E" - }, { - "pattern":"\\QMETA-INF/services/java.lang.System$LoggerFinder\\E" - }, { - "pattern":"\\QMETA-INF/services/java.net.spi.InetAddressResolverProvider\\E" - }, { - "pattern":"\\QMETA-INF/services/java.net.spi.URLStreamHandlerProvider\\E" - }, { - "pattern":"\\QMETA-INF/services/java.nio.channels.spi.AsynchronousChannelProvider\\E" - }, { - "pattern":"\\QMETA-INF/services/java.nio.channels.spi.SelectorProvider\\E" - }, { - "pattern":"\\QMETA-INF/services/java.time.zone.ZoneRulesProvider\\E" - }, { - "pattern":"\\QMETA-INF/services/javax.xml.parsers.SAXParserFactory\\E" - }, { - "pattern":"\\QMETA-INF/services/org.slf4j.spi.SLF4JServiceProvider\\E" - }, { "pattern":"\\Qcom/sun/jna/darwin-aarch64/libjnidispatch.jnilib\\E" }, { "pattern":"\\Qcom/sun/jna/darwin-x86-64/libjnidispatch.jnilib\\E" @@ -44,6 +26,24 @@ "pattern":"\\Qlinux-x86-64/libmongocrypt.so\\E" }, { "pattern":"\\Qwin32-x86-64/mongocrypt.dll\\E" + }, { + "pattern":"\\QMETA-INF/services/ch.qos.logback.classic.spi.Configurator\\E" + }, { + "pattern":"\\QMETA-INF/services/java.lang.System$LoggerFinder\\E" + }, { + "pattern":"\\QMETA-INF/services/java.net.spi.InetAddressResolverProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.net.spi.URLStreamHandlerProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.nio.channels.spi.AsynchronousChannelProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.nio.channels.spi.SelectorProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.time.zone.ZoneRulesProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/javax.xml.parsers.SAXParserFactory\\E" + }, { + "pattern":"\\QMETA-INF/services/org.slf4j.spi.SLF4JServiceProvider\\E" }, { "pattern":"\\Qlogback-test.scmo\\E" }, { diff --git a/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.DnsClientProvider b/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.DnsClientProvider new file mode 100644 index 00000000000..4b53a569c91 --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.DnsClientProvider @@ -0,0 +1 @@ +com.mongodb.internal.graalvm.CustomDnsClientProvider From e89ada54e1bf8d1f8033bd00a7be2801e277e880 Mon Sep 17 00:00:00 2001 From: Viacheslav Babanin Date: Mon, 23 Sep 2024 10:52:53 -0700 Subject: [PATCH 78/90] Set maxTimeMS explicitly for commands being explained (#1497) JAVA-5580 --- .../com/mongodb/internal/TimeoutContext.java | 14 +++- .../internal/connection/CommandHelper.java | 16 ++++ .../operation/AggregateOperation.java | 8 +- .../internal/operation/FindOperation.java | 10 ++- .../MapReduceToCollectionOperation.java | 9 ++- .../MapReduceWithInlineResultsOperation.java | 10 ++- .../internal/operation/OperationHelper.java | 2 +- .../mongodb/client/AbstractExplainTest.java | 75 ++++++++++++++++++- 8 files changed, 129 insertions(+), 15 deletions(-) diff --git a/driver-core/src/main/com/mongodb/internal/TimeoutContext.java b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java index 0b4907c2ff1..93df2a09922 100644 --- a/driver-core/src/main/com/mongodb/internal/TimeoutContext.java +++ b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java @@ -17,6 +17,7 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.connection.CommandMessage; import com.mongodb.internal.time.StartTime; import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; @@ -213,14 +214,23 @@ public void resetToDefaultMaxTime() { /** * The override will be provided as the remaining value in - * {@link #runMaxTimeMS}, where 0 is ignored. + * {@link #runMaxTimeMS}, where 0 is ignored. This is useful for setting timeout + * in {@link CommandMessage} as an extra element before we send it to the server. + * *

      * NOTE: Suitable for static user-defined values only (i.e MaxAwaitTimeMS), - * not for running timeouts that adjust dynamically. + * not for running timeouts that adjust dynamically (CSOT). */ public void setMaxTimeOverride(final long maxTimeMS) { this.maxTimeSupplier = () -> maxTimeMS; } + /** + * Disable the maxTimeMS override. This way the maxTimeMS will not + * be appended to the command in the {@link CommandMessage}. + */ + public void disableMaxTimeOverride() { + this.maxTimeSupplier = () -> 0; + } /** * The override will be provided as the remaining value in diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java index 11dfd94e935..fa7c1f0739d 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java @@ -20,10 +20,12 @@ import com.mongodb.MongoServerException; import com.mongodb.ServerApi; import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; +import org.bson.BsonInt64; import org.bson.BsonValue; import org.bson.codecs.BsonDocumentCodec; @@ -117,6 +119,20 @@ private static CommandMessage getCommandMessage(final String database, final Bso clusterConnectionMode, serverApi); } + + /** + * Appends a user-defined maxTimeMS to the command if CSOT is not enabled. + * This is necessary when maxTimeMS must be explicitly set on the command being explained, + * rather than appending it lazily to the explain command in the {@link CommandMessage} via {@link TimeoutContext#setMaxTimeOverride(long)}. + * This ensures backwards compatibility with pre-CSOT behavior. + */ + public static void applyMaxTimeMS(final TimeoutContext timeoutContext, final BsonDocument command) { + if (!timeoutContext.hasTimeoutMS()) { + command.append("maxTimeMS", new BsonInt64(timeoutContext.getTimeoutSettings().getMaxTimeMS())); + timeoutContext.disableMaxTimeOverride(); + } + } + private CommandHelper() { } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java index 07943560b40..f50304480b5 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java @@ -32,6 +32,7 @@ import java.util.List; +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION; @@ -155,8 +156,11 @@ public AsyncReadOperation asAsyncExplainableOperation(@Nullable final Exp CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { return new CommandReadOperation<>(getNamespace().getDatabaseName(), - (operationContext, serverDescription, connectionDescription) -> - asExplainCommand(wrapped.getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder); + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = wrapped.getCommand(operationContext, MIN_WIRE_VERSION); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, verbosity); + }, resultDecoder); } MongoNamespace getNamespace() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java index 514e48b4db8..abdbc328a14 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java @@ -42,6 +42,7 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.createReadCommandAndExecuteAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync; @@ -364,8 +365,11 @@ public AsyncReadOperation asAsyncExplainableOperation(@Nullable final Exp CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { return new CommandReadOperation<>(getNamespace().getDatabaseName(), - (operationContext, serverDescription, connectionDescription) -> - asExplainCommand(getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder); + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = getCommand(operationContext, MIN_WIRE_VERSION); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, verbosity); + }, resultDecoder); } private BsonDocument getCommand(final OperationContext operationContext, final int maxWireVersion) { @@ -397,7 +401,7 @@ private BsonDocument getCommand(final OperationContext operationContext, final i if (isAwaitData()) { commandDocument.put("awaitData", BsonBoolean.TRUE); } else { - operationContext.getTimeoutContext().setMaxTimeOverride(0L); + operationContext.getTimeoutContext().disableMaxTimeOverride(); } } else { setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java index b93be56d6f2..327aa5e5fa7 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java @@ -35,6 +35,7 @@ import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandWriteTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; @@ -243,9 +244,11 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { return new CommandReadOperation<>(getNamespace().getDatabaseName(), - (operationContext, serverDescription, connectionDescription) -> - asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription), - explainVerbosity), new BsonDocumentCodec()); + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = getCommandCreator().create(operationContext, serverDescription, connectionDescription); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, explainVerbosity); + }, new BsonDocumentCodec()); } private CommandWriteTransformer transformer(final TimeoutContext timeoutContext) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java index 695053e8845..273d8595ec8 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java @@ -32,6 +32,7 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; @@ -188,9 +189,12 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { return new CommandReadOperation<>(namespace.getDatabaseName(), - (operationContext, serverDescription, connectionDescription) -> - asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription), - explainVerbosity), new BsonDocumentCodec()); + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = getCommandCreator().create(operationContext, serverDescription, connectionDescription); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, + explainVerbosity); + }, new BsonDocumentCodec()); } private CommandReadTransformer> transformer() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java index ac69f8742c7..04318635a06 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java @@ -198,7 +198,7 @@ static boolean canRetryRead(final ServerDescription serverDescription, final Ope static void setNonTailableCursorMaxTimeSupplier(final TimeoutMode timeoutMode, final OperationContext operationContext) { if (timeoutMode == TimeoutMode.ITERATION) { - operationContext.getTimeoutContext().setMaxTimeOverride(0L); + operationContext.getTimeoutContext().disableMaxTimeOverride(); } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java index 7db4a079a5e..d9df697b3ed 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java @@ -18,8 +18,11 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; import com.mongodb.client.model.Aggregates; import com.mongodb.client.model.Filters; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; import org.bson.BsonDocument; import org.bson.BsonInt32; import org.bson.Document; @@ -27,10 +30,13 @@ import org.junit.Before; import org.junit.Test; +import java.util.concurrent.TimeUnit; + import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.ClusterFixture.serverVersionLessThan; import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static java.util.Collections.singletonList; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -39,17 +45,20 @@ public abstract class AbstractExplainTest { private MongoClient client; + private TestCommandListener commandListener; protected abstract MongoClient createMongoClient(MongoClientSettings settings); @Before public void setUp() { - client = createMongoClient(Fixture.getMongoClientSettings()); + commandListener = new TestCommandListener(); + client = createMongoClient(Fixture.getMongoClientSettingsBuilder().addCommandListener(commandListener).build()); } @After public void tearDown() { client.close(); + commandListener.reset(); } @Test @@ -83,6 +92,60 @@ public void testExplainOfFind() { assertFalse(explainBsonDocument.containsKey("executionStats")); } + @Test + public void testFindContainsMaxTimeMsInExplain() { + //given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + + FindIterable iterable = collection.find() + .maxTime(500, TimeUnit.MILLISECONDS); + + //when + iterable.explain(); + + //then + assertExplainableCommandContainMaxTimeMS(); + } + + @Test + public void testAggregateContainsMaxTimeMsInExplain() { + //given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + + AggregateIterable iterable = collection.aggregate( + singletonList(Aggregates.match(Filters.eq("_id", 1)))) + .maxTime(500, TimeUnit.MILLISECONDS); + + //when + iterable.explain(); + + //then + assertExplainableCommandContainMaxTimeMS(); + } + + @Test + public void testListSearchIndexesContainsMaxTimeMsInExplain() { + //given + assumeTrue(serverVersionAtLeast(6, 0)); + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + + ListSearchIndexesIterable iterable = collection.listSearchIndexes() + .maxTime(500, TimeUnit.MILLISECONDS); + + //when + try { + iterable.explain(); + } catch (MongoCommandException throwable) { + //we expect listSearchIndexes is only supported in Atlas Search in some deployments. + } + + //then + assertExplainableCommandContainMaxTimeMS(); + } + @Test public void testExplainOfAggregateWithNewResponseStructure() { // Aggregate explain is supported on earlier versions, but the structure of the response on which we're asserting in this test @@ -167,4 +230,14 @@ public void testExplainOfAggregateWithOldResponseStructure() { explainBsonDocument = iterable.explain(BsonDocument.class, ExplainVerbosity.QUERY_PLANNER); assertNotNull(explainBsonDocument); } + + private void assertExplainableCommandContainMaxTimeMS() { + assertEquals(1, commandListener.getCommandStartedEvents().size()); + CommandStartedEvent explain = commandListener.getCommandStartedEvent("explain"); + BsonDocument explainCommand = explain.getCommand(); + BsonDocument explainableCommand = explainCommand.getDocument("explain"); + + assertFalse(explainCommand.containsKey("maxTimeMS")); + assertTrue(explainableCommand.containsKey("maxTimeMS")); + } } From 2932092fd5f469b5207228c0d0d0be5e2fe946dc Mon Sep 17 00:00:00 2001 From: Jeff Yemin Date: Mon, 23 Sep 2024 13:59:48 -0400 Subject: [PATCH 79/90] Move mongodb-crypt classes to internal package (#1500) * Move all but one of the classes in mongodb-crypt module to an internal package * The MongoCryptException class stays where it is because it is exposed in the API as a chained exception. JAVA-5584 --- config/spotbugs/exclude.xml | 2 +- .../framework/MongoCryptBenchmarkRunner.java | 14 +-- .../internal/capi/MongoCryptHelper.java | 2 +- .../client/vault/EncryptOptionsHelper.java | 2 +- .../internal/capi/MongoCryptHelperTest.java | 2 +- .../client/internal/crypt/CommandMarker.java | 2 +- .../client/internal/crypt/Crypt.java | 12 +- .../client/internal/crypt/Crypts.java | 4 +- .../internal/crypt/KeyManagementService.java | 2 +- .../client/internal/CommandMarker.java | 2 +- .../com/mongodb/client/internal/Crypt.java | 12 +- .../com/mongodb/client/internal/Crypts.java | 4 +- .../crypt/capi/MongoCryptException.java | 29 ++--- .../crypt/capi/BinaryHolder.java | 6 +- .../{ => internal}/crypt/capi/CAPI.java | 2 +- .../{ => internal}/crypt/capi/CAPIHelper.java | 6 +- .../crypt/capi/CipherCallback.java | 18 +-- .../crypt/capi/DisposableMemory.java | 2 +- .../{ => internal}/crypt/capi/JULLogger.java | 2 +- .../{ => internal}/crypt/capi/Logger.java | 2 +- .../{ => internal}/crypt/capi/Loggers.java | 2 +- .../crypt/capi/MacCallback.java | 18 +-- .../crypt/capi/MessageDigestCallback.java | 18 +-- .../capi/MongoAwsKmsProviderOptions.java | 2 +- .../{ => internal}/crypt/capi/MongoCrypt.java | 2 +- .../crypt/capi/MongoCryptContext.java | 3 +- .../crypt/capi/MongoCryptContextImpl.java | 48 ++++---- .../crypt/capi/MongoCryptImpl.java | 106 +++++++++--------- .../crypt/capi/MongoCryptOptions.java | 3 +- .../crypt/capi/MongoCrypts.java | 2 +- .../crypt/capi/MongoDataKeyOptions.java | 2 +- .../capi/MongoExplicitEncryptOptions.java | 2 +- .../crypt/capi/MongoKeyDecryptor.java | 2 +- .../crypt/capi/MongoKeyDecryptorImpl.java | 38 ++++--- .../capi/MongoLocalKmsProviderOptions.java | 2 +- .../capi/MongoRewrapManyDataKeyOptions.java | 2 +- .../crypt/capi/SLF4JLogger.java | 2 +- .../crypt/capi/SecureRandomCallback.java | 16 +-- .../crypt/capi/SigningRSAESPKCSCallback.java | 18 +-- .../internal/crypt/capi/package-info.java | 21 ++++ .../mongodb/crypt/capi/MongoCryptTest.java | 11 +- 41 files changed, 243 insertions(+), 204 deletions(-) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/BinaryHolder.java (87%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/CAPI.java (99%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/CAPIHelper.java (94%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/CipherCallback.java (83%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/DisposableMemory.java (95%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/JULLogger.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/Logger.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/Loggers.java (96%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MacCallback.java (73%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MessageDigestCallback.java (71%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoAwsKmsProviderOptions.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoCrypt.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoCryptContext.java (97%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoCryptContextImpl.java (68%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoCryptImpl.java (79%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoCryptOptions.java (99%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoCrypts.java (96%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoDataKeyOptions.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoExplicitEncryptOptions.java (99%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoKeyDecryptor.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoKeyDecryptorImpl.java (63%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoLocalKmsProviderOptions.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/MongoRewrapManyDataKeyOptions.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/SLF4JLogger.java (98%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/SecureRandomCallback.java (72%) rename mongodb-crypt/src/main/com/mongodb/{ => internal}/crypt/capi/SigningRSAESPKCSCallback.java (80%) create mode 100644 mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/package-info.java diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml index 9ce5b944cb4..488c797a6a0 100644 --- a/config/spotbugs/exclude.xml +++ b/config/spotbugs/exclude.xml @@ -262,7 +262,7 @@ - + diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java index 33b6c0ad102..718ab9f21af 100644 --- a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java @@ -17,13 +17,13 @@ * */ -import com.mongodb.crypt.capi.CAPI; -import com.mongodb.crypt.capi.MongoCrypt; -import com.mongodb.crypt.capi.MongoCryptContext; -import com.mongodb.crypt.capi.MongoCryptOptions; -import com.mongodb.crypt.capi.MongoCrypts; -import com.mongodb.crypt.capi.MongoExplicitEncryptOptions; -import com.mongodb.crypt.capi.MongoLocalKmsProviderOptions; +import com.mongodb.internal.crypt.capi.CAPI; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; +import com.mongodb.internal.crypt.capi.MongoCrypts; +import com.mongodb.internal.crypt.capi.MongoExplicitEncryptOptions; +import com.mongodb.internal.crypt.capi.MongoLocalKmsProviderOptions; import org.bson.BsonBinary; import org.bson.BsonBinarySubType; import org.bson.BsonDocument; diff --git a/driver-core/src/main/com/mongodb/internal/capi/MongoCryptHelper.java b/driver-core/src/main/com/mongodb/internal/capi/MongoCryptHelper.java index bdd80562714..1ba51797bea 100644 --- a/driver-core/src/main/com/mongodb/internal/capi/MongoCryptHelper.java +++ b/driver-core/src/main/com/mongodb/internal/capi/MongoCryptHelper.java @@ -24,10 +24,10 @@ import com.mongodb.MongoClientSettings; import com.mongodb.MongoConfigurationException; import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; -import com.mongodb.crypt.capi.MongoCryptOptions; import com.mongodb.internal.authentication.AwsCredentialHelper; import com.mongodb.internal.authentication.AzureCredentialHelper; import com.mongodb.internal.authentication.GcpCredentialHelper; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonDocumentWrapper; diff --git a/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java b/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java index edd0a4d958f..640707d94d3 100644 --- a/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java +++ b/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java @@ -17,7 +17,7 @@ import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.model.vault.RangeOptions; -import com.mongodb.crypt.capi.MongoExplicitEncryptOptions; +import com.mongodb.internal.crypt.capi.MongoExplicitEncryptOptions; import org.bson.BsonDocument; import org.bson.BsonInt32; import org.bson.BsonInt64; diff --git a/driver-core/src/test/functional/com/mongodb/internal/capi/MongoCryptHelperTest.java b/driver-core/src/test/functional/com/mongodb/internal/capi/MongoCryptHelperTest.java index c7d82748efb..d76371775b9 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/capi/MongoCryptHelperTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/capi/MongoCryptHelperTest.java @@ -21,7 +21,7 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoClientSettings; import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; -import com.mongodb.crypt.capi.MongoCryptOptions; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; import org.bson.BsonDocument; import org.junit.jupiter.api.Test; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java index 0d15f5c970d..443ebbe14bd 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java @@ -22,7 +22,7 @@ import com.mongodb.MongoOperationTimeoutException; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; -import com.mongodb.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCrypt; import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.MongoClient; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java index 13d9373a3ff..dcfceedf155 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java @@ -24,13 +24,13 @@ import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; -import com.mongodb.crypt.capi.MongoCrypt; -import com.mongodb.crypt.capi.MongoCryptContext; import com.mongodb.crypt.capi.MongoCryptException; -import com.mongodb.crypt.capi.MongoDataKeyOptions; -import com.mongodb.crypt.capi.MongoKeyDecryptor; -import com.mongodb.crypt.capi.MongoRewrapManyDataKeyOptions; import com.mongodb.internal.capi.MongoCryptHelper; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoDataKeyOptions; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.crypt.capi.MongoRewrapManyDataKeyOptions; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.time.Timeout; @@ -48,8 +48,8 @@ import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.crypt.capi.MongoCryptContext.State; import static com.mongodb.internal.client.vault.EncryptOptionsHelper.asMongoExplicitEncryptOptions; +import static com.mongodb.internal.crypt.capi.MongoCryptContext.State; /** *

      This class is not part of the public API and may be removed or changed at any time

      diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java index d59b1e03696..b06af01d476 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java @@ -21,8 +21,8 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoClientSettings; import com.mongodb.MongoNamespace; -import com.mongodb.crypt.capi.MongoCrypt; -import com.mongodb.crypt.capi.MongoCrypts; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCrypts; import com.mongodb.reactivestreams.client.MongoClient; import com.mongodb.reactivestreams.client.MongoClients; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java index 465ffc02e80..019445e6cde 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java @@ -24,7 +24,6 @@ import com.mongodb.connection.AsyncCompletionHandler; import com.mongodb.connection.SocketSettings; import com.mongodb.connection.SslSettings; -import com.mongodb.crypt.capi.MongoKeyDecryptor; import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.connection.AsynchronousChannelStream; @@ -33,6 +32,7 @@ import com.mongodb.internal.connection.Stream; import com.mongodb.internal.connection.StreamFactory; import com.mongodb.internal.connection.TlsChannelStreamFactoryFactory; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.time.Timeout; diff --git a/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java index 9e2d7b3889b..73eed8efd01 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java +++ b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java @@ -26,7 +26,7 @@ import com.mongodb.client.MongoClient; import com.mongodb.client.MongoClients; import com.mongodb.client.MongoDatabase; -import com.mongodb.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCrypt; import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.RawBsonDocument; diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java index 990f196f62c..b910f0ab01c 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java +++ b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java @@ -23,13 +23,13 @@ import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; -import com.mongodb.crypt.capi.MongoCrypt; -import com.mongodb.crypt.capi.MongoCryptContext; import com.mongodb.crypt.capi.MongoCryptException; -import com.mongodb.crypt.capi.MongoDataKeyOptions; -import com.mongodb.crypt.capi.MongoKeyDecryptor; -import com.mongodb.crypt.capi.MongoRewrapManyDataKeyOptions; import com.mongodb.internal.capi.MongoCryptHelper; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoDataKeyOptions; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.crypt.capi.MongoRewrapManyDataKeyOptions; import com.mongodb.internal.time.Timeout; import com.mongodb.lang.Nullable; import org.bson.BsonBinary; @@ -46,8 +46,8 @@ import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.crypt.capi.MongoCryptContext.State; import static com.mongodb.internal.client.vault.EncryptOptionsHelper.asMongoExplicitEncryptOptions; +import static com.mongodb.internal.crypt.capi.MongoCryptContext.State; import static com.mongodb.internal.thread.InterruptionUtil.translateInterruptedException; /** diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypts.java b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java index 55274fcc786..30319bbf4f8 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/Crypts.java +++ b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java @@ -22,8 +22,8 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.MongoClient; import com.mongodb.client.MongoClients; -import com.mongodb.crypt.capi.MongoCrypt; -import com.mongodb.crypt.capi.MongoCrypts; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCrypts; import javax.net.ssl.SSLContext; import java.util.Map; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java index 63074e20bc9..f2fdaeb7699 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java @@ -18,18 +18,24 @@ package com.mongodb.crypt.capi; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; - -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_code; -import static org.bson.assertions.Assertions.isTrue; - /** - * Top level Exception for all Mongo Crypt CAPI exceptions + * Exception thrown for errors originating in the mongodb-crypt module. */ public class MongoCryptException extends RuntimeException { private static final long serialVersionUID = -5524416583514807953L; private final int code; + /** + * Construct an instance + * + * @param message the message + * @param code the code + */ + public MongoCryptException(final String message, final int code) { + super(message); + this.code = code; + } + /** * @param msg the message */ @@ -47,17 +53,6 @@ public MongoCryptException(final String msg, final Throwable cause) { this.code = -1; } - /** - * Construct an instance from a {@code mongocrypt_status_t}. - * - * @param status the status - */ - MongoCryptException(final mongocrypt_status_t status) { - super(CAPI.mongocrypt_status_message(status, null).toString()); - isTrue("status not ok", !CAPI.mongocrypt_status_ok(status)); - code = mongocrypt_status_code(status); - } - /** * @return the error code for the exception. */ diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/BinaryHolder.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/BinaryHolder.java similarity index 87% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/BinaryHolder.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/BinaryHolder.java index 60570bd1180..14c7c7b29b6 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/BinaryHolder.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/BinaryHolder.java @@ -15,11 +15,11 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_destroy; // Wrap JNA memory and a mongocrypt_binary_t that references that memory, in order to ensure that the JNA Memory is not GC'd before the // mongocrypt_binary_t is destroyed diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPI.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPI.java similarity index 99% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPI.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPI.java index d6567bdaf7c..b8e2cacc677 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPI.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPI.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import com.sun.jna.Callback; import com.sun.jna.Memory; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPIHelper.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPIHelper.java similarity index 94% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPIHelper.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPIHelper.java index c1de63e8c8c..ba612e1d217 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CAPIHelper.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPIHelper.java @@ -15,9 +15,9 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; import com.sun.jna.Pointer; import org.bson.BsonBinaryWriter; import org.bson.BsonDocument; @@ -31,7 +31,7 @@ import java.nio.ByteBuffer; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_new_from_data; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_new_from_data; import static java.lang.String.format; final class CAPIHelper { diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CipherCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CipherCallback.java similarity index 83% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/CipherCallback.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CipherCallback.java index b10c0f21c67..2e4888d9857 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/CipherCallback.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CipherCallback.java @@ -15,12 +15,12 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.cstring; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_crypto_fn; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_crypto_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; import com.sun.jna.Pointer; import javax.crypto.Cipher; @@ -30,10 +30,10 @@ import java.security.NoSuchAlgorithmException; import java.util.concurrent.ConcurrentLinkedDeque; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; -import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; -import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; class CipherCallback implements mongocrypt_crypto_fn { private final String algorithm; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/DisposableMemory.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/DisposableMemory.java similarity index 95% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/DisposableMemory.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/DisposableMemory.java index fdcfb268fea..924b1cc90b1 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/DisposableMemory.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/DisposableMemory.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import com.sun.jna.Memory; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/JULLogger.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/JULLogger.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/JULLogger.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/JULLogger.java index 9a53e850d15..43c15bbf489 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/JULLogger.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/JULLogger.java @@ -16,7 +16,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import java.util.logging.Level; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/Logger.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Logger.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/Logger.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Logger.java index 38e82c235b8..e3ea361af4d 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/Logger.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Logger.java @@ -16,7 +16,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; /** * Not part of the public API diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/Loggers.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Loggers.java similarity index 96% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/Loggers.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Loggers.java index c57cd3994e4..a5ce431fbcf 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/Loggers.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Loggers.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; /** * This class is not part of the public API. diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MacCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MacCallback.java similarity index 73% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MacCallback.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MacCallback.java index 2ea09550bb4..98a0e833faa 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MacCallback.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MacCallback.java @@ -15,21 +15,21 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.cstring; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_hmac_fn; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_hmac_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; import com.sun.jna.Pointer; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; -import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; -import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; class MacCallback implements mongocrypt_hmac_fn { private final String algorithm; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MessageDigestCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MessageDigestCallback.java similarity index 71% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MessageDigestCallback.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MessageDigestCallback.java index 861290d0a8f..35e6a8f78ed 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MessageDigestCallback.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MessageDigestCallback.java @@ -15,20 +15,20 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.cstring; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_hash_fn; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_hash_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; import com.sun.jna.Pointer; import java.security.MessageDigest; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; -import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; -import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; class MessageDigestCallback implements mongocrypt_hash_fn { diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoAwsKmsProviderOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoAwsKmsProviderOptions.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoAwsKmsProviderOptions.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoAwsKmsProviderOptions.java index 4824197510d..d37f0b7f91f 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoAwsKmsProviderOptions.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoAwsKmsProviderOptions.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import static org.bson.assertions.Assertions.notNull; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypt.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypt.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypt.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypt.java index 74816dbe42c..506b6428d8b 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypt.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypt.java @@ -16,7 +16,7 @@ */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import org.bson.BsonDocument; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContext.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContext.java similarity index 97% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContext.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContext.java index 2c3aa250b87..573e1cdf881 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContext.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContext.java @@ -15,8 +15,9 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; +import com.mongodb.crypt.capi.MongoCryptException; import org.bson.BsonDocument; import org.bson.RawBsonDocument; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContextImpl.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContextImpl.java similarity index 68% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContextImpl.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContextImpl.java index 34aaafe7344..502784fdb72 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptContextImpl.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContextImpl.java @@ -15,31 +15,34 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_t; +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_t; import org.bson.BsonDocument; import org.bson.RawBsonDocument; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_destroy; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_new; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_destroy; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_finalize; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_kms_done; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_mongo_done; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_mongo_feed; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_mongo_op; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_next_kms_ctx; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_provide_kms_providers; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_state; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_status; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_destroy; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_new; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; -import static com.mongodb.crypt.capi.CAPIHelper.toBinary; -import static com.mongodb.crypt.capi.CAPIHelper.toDocument; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_finalize; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_kms_done; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_mongo_done; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_mongo_feed; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_mongo_op; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_next_kms_ctx; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_provide_kms_providers; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_state; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_status; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_code; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toBinary; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toDocument; import static org.bson.assertions.Assertions.isTrue; import static org.bson.assertions.Assertions.notNull; @@ -153,7 +156,8 @@ public void close() { static void throwExceptionFromStatus(final mongocrypt_ctx_t wrapped) { mongocrypt_status_t status = mongocrypt_status_new(); mongocrypt_ctx_status(wrapped, status); - MongoCryptException e = new MongoCryptException(status); + MongoCryptException e = new MongoCryptException(mongocrypt_status_message(status, null).toString(), + mongocrypt_status_code(status)); mongocrypt_status_destroy(status); throw e; } diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptImpl.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptImpl.java similarity index 79% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptImpl.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptImpl.java index 2949e2a11e4..37f2263da69 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptImpl.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptImpl.java @@ -15,13 +15,14 @@ * */ -package com.mongodb.crypt.capi; - -import com.mongodb.crypt.capi.CAPI.cstring; -import com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_log_fn_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_t; +package com.mongodb.internal.crypt.capi; + +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_log_fn_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_t; import com.sun.jna.Pointer; import org.bson.BsonBinary; import org.bson.BsonDocument; @@ -33,49 +34,51 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_ERROR; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_FATAL; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_INFO; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_TRACE; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_WARNING; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_crypt_shared_lib_version_string; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_datakey_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_decrypt_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_encrypt_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_explicit_decrypt_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_explicit_encrypt_expression_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_explicit_encrypt_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_new; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_rewrap_many_datakey_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm_range; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_contention_factor; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_alt_name; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_encryption_key; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_id; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_material; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_ctx_setopt_query_type; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_destroy; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_init; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_is_crypto_available; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_new; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_aes_256_ctr; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_append_crypt_shared_lib_search_path; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_bypass_query_analysis; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_crypto_hooks; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_encrypted_field_config_map; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_aws; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_local; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_kms_providers; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_log_handler; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_schema_map; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_set_crypt_shared_lib_path_override; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_setopt_use_need_kms_credentials_state; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_destroy; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_new; -import static com.mongodb.crypt.capi.CAPIHelper.toBinary; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_ERROR; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_FATAL; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_INFO; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_TRACE; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_WARNING; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_crypt_shared_lib_version_string; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_datakey_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_decrypt_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_encrypt_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_explicit_decrypt_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_explicit_encrypt_expression_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_explicit_encrypt_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_rewrap_many_datakey_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm_range; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_contention_factor; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_alt_name; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_encryption_key; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_id; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_material; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_query_type; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_is_crypto_available; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_aes_256_ctr; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_append_crypt_shared_lib_search_path; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_bypass_query_analysis; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_crypto_hooks; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_encrypted_field_config_map; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_aws; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_local; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_kms_providers; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_log_handler; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_schema_map; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_set_crypt_shared_lib_path_override; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_use_need_kms_credentials_state; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_code; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toBinary; import static org.bson.assertions.Assertions.isTrue; import static org.bson.assertions.Assertions.notNull; @@ -395,7 +398,8 @@ private void configure(final Supplier successSupplier, final mongocrypt private void throwExceptionFromStatus() { mongocrypt_status_t status = mongocrypt_status_new(); mongocrypt_status(wrapped, status); - MongoCryptException e = new MongoCryptException(status); + MongoCryptException e = new MongoCryptException(mongocrypt_status_message(status, null).toString(), + mongocrypt_status_code(status)); mongocrypt_status_destroy(status); throw e; } diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptOptions.java similarity index 99% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptOptions.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptOptions.java index dc65bbdd9ae..46c9898a9a1 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptOptions.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptOptions.java @@ -15,9 +15,10 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import org.bson.BsonDocument; + import java.util.List; import java.util.Map; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypts.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypts.java similarity index 96% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypts.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypts.java index 683dcdf90f1..58739043627 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCrypts.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypts.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; /** * The entry point to the MongoCrypt library. diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoDataKeyOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoDataKeyOptions.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoDataKeyOptions.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoDataKeyOptions.java index 27f62514aeb..6ec24954475 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoDataKeyOptions.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoDataKeyOptions.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import org.bson.BsonDocument; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoExplicitEncryptOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoExplicitEncryptOptions.java similarity index 99% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoExplicitEncryptOptions.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoExplicitEncryptOptions.java index 2dad2182e7d..9080a773747 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoExplicitEncryptOptions.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoExplicitEncryptOptions.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import org.bson.BsonBinary; import org.bson.BsonDocument; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptor.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptor.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptor.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptor.java index 43a724348d6..9b0eae6776f 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptor.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptor.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import java.nio.ByteBuffer; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptorImpl.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptorImpl.java similarity index 63% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptorImpl.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptorImpl.java index cef14bf855f..1411adffc21 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoKeyDecryptorImpl.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptorImpl.java @@ -15,28 +15,31 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; import com.sun.jna.Pointer; import com.sun.jna.ptr.PointerByReference; import java.nio.ByteBuffer; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_destroy; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_binary_new; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_bytes_needed; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_endpoint; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_feed; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_get_kms_provider; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_message; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_kms_ctx_status; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_destroy; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_new; -import static com.mongodb.crypt.capi.CAPIHelper.toBinary; -import static com.mongodb.crypt.capi.CAPIHelper.toByteBuffer; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_bytes_needed; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_endpoint; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_feed; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_get_kms_provider; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_status; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_code; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toBinary; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteBuffer; import static org.bson.assertions.Assertions.notNull; class MongoKeyDecryptorImpl implements MongoKeyDecryptor { @@ -96,7 +99,8 @@ public void feed(final ByteBuffer bytes) { private void throwExceptionFromStatus() { mongocrypt_status_t status = mongocrypt_status_new(); mongocrypt_kms_ctx_status(wrapped, status); - MongoCryptException e = new MongoCryptException(status); + MongoCryptException e = new MongoCryptException(mongocrypt_status_message(status, null).toString(), + mongocrypt_status_code(status)); mongocrypt_status_destroy(status); throw e; } diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoLocalKmsProviderOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoLocalKmsProviderOptions.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoLocalKmsProviderOptions.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoLocalKmsProviderOptions.java index be8eef09573..d2a975b8fae 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoLocalKmsProviderOptions.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoLocalKmsProviderOptions.java @@ -15,7 +15,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import java.nio.ByteBuffer; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoRewrapManyDataKeyOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoRewrapManyDataKeyOptions.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoRewrapManyDataKeyOptions.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoRewrapManyDataKeyOptions.java index 0bfc6defa63..84c5031d635 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoRewrapManyDataKeyOptions.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoRewrapManyDataKeyOptions.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import org.bson.BsonDocument; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SLF4JLogger.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SLF4JLogger.java similarity index 98% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/SLF4JLogger.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SLF4JLogger.java index 23064f8bf85..2ed00d74562 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SLF4JLogger.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SLF4JLogger.java @@ -16,7 +16,7 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; import org.slf4j.LoggerFactory; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SecureRandomCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SecureRandomCallback.java similarity index 72% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/SecureRandomCallback.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SecureRandomCallback.java index 0a2a83c02f7..215f453f923 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SecureRandomCallback.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SecureRandomCallback.java @@ -15,19 +15,19 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.cstring; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_random_fn; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_random_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; import com.sun.jna.Pointer; import java.security.SecureRandom; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; -import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; class SecureRandomCallback implements mongocrypt_random_fn { private final SecureRandom secureRandom; diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SigningRSAESPKCSCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SigningRSAESPKCSCallback.java similarity index 80% rename from mongodb-crypt/src/main/com/mongodb/crypt/capi/SigningRSAESPKCSCallback.java rename to mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SigningRSAESPKCSCallback.java index a5b7ac9f050..12717a466c9 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/SigningRSAESPKCSCallback.java +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SigningRSAESPKCSCallback.java @@ -15,12 +15,12 @@ * */ -package com.mongodb.crypt.capi; +package com.mongodb.internal.crypt.capi; -import com.mongodb.crypt.capi.CAPI.cstring; -import com.mongodb.crypt.capi.CAPI.mongocrypt_binary_t; -import com.mongodb.crypt.capi.CAPI.mongocrypt_hmac_fn; -import com.mongodb.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_hmac_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; import com.sun.jna.Pointer; import java.security.InvalidKeyException; @@ -33,10 +33,10 @@ import java.security.spec.KeySpec; import java.security.spec.PKCS8EncodedKeySpec; -import static com.mongodb.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; -import static com.mongodb.crypt.capi.CAPI.mongocrypt_status_set; -import static com.mongodb.crypt.capi.CAPIHelper.toByteArray; -import static com.mongodb.crypt.capi.CAPIHelper.writeByteArrayToBinary; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; class SigningRSAESPKCSCallback implements mongocrypt_hmac_fn { diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/package-info.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/package-info.java new file mode 100644 index 00000000000..5789855267d --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * The mongocrypt internal package + */ +package com.mongodb.internal.crypt.capi; diff --git a/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java b/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java index 87fbab2e82f..32e87714bb7 100644 --- a/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java +++ b/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java @@ -17,7 +17,16 @@ package com.mongodb.crypt.capi; -import com.mongodb.crypt.capi.MongoCryptContext.State; +import com.mongodb.internal.crypt.capi.MongoAwsKmsProviderOptions; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoCryptContext.State; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; +import com.mongodb.internal.crypt.capi.MongoCrypts; +import com.mongodb.internal.crypt.capi.MongoDataKeyOptions; +import com.mongodb.internal.crypt.capi.MongoExplicitEncryptOptions; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.crypt.capi.MongoLocalKmsProviderOptions; import org.bson.BsonBinary; import org.bson.BsonBinarySubType; import org.bson.BsonDocument; From 22cc6d0322c211940463e04d4b8ec197bc6b3459 Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Mon, 23 Sep 2024 19:44:58 +0100 Subject: [PATCH 80/90] Remove Beta annotation for vector search (#1496) JAVA-5578 --- driver-core/src/main/com/mongodb/client/model/Aggregates.java | 1 - driver-core/src/main/com/mongodb/client/model/Projections.java | 1 - .../client/model/search/ApproximateVectorSearchOptions.java | 3 --- .../mongodb/client/model/search/ExactVectorSearchOptions.java | 3 --- .../com/mongodb/client/model/search/VectorSearchOptions.java | 3 --- .../src/main/scala/org/mongodb/scala/model/Aggregates.scala | 1 - .../org/mongodb/scala/model/search/VectorSearchOptions.scala | 1 - 7 files changed, 13 deletions(-) diff --git a/driver-core/src/main/com/mongodb/client/model/Aggregates.java b/driver-core/src/main/com/mongodb/client/model/Aggregates.java index 152cacc659b..7d521ed44a2 100644 --- a/driver-core/src/main/com/mongodb/client/model/Aggregates.java +++ b/driver-core/src/main/com/mongodb/client/model/Aggregates.java @@ -955,7 +955,6 @@ public static Bson searchMeta(final SearchCollector collector, final SearchOptio * @mongodb.server.release 6.0.11 * @since 4.11 */ - @Beta(Reason.SERVER) public static Bson vectorSearch( final FieldSearchPath path, final Iterable queryVector, diff --git a/driver-core/src/main/com/mongodb/client/model/Projections.java b/driver-core/src/main/com/mongodb/client/model/Projections.java index 18cda97c62b..11bcfcffc62 100644 --- a/driver-core/src/main/com/mongodb/client/model/Projections.java +++ b/driver-core/src/main/com/mongodb/client/model/Projections.java @@ -224,7 +224,6 @@ public static Bson metaSearchScore(final String fieldName) { * @mongodb.server.release 6.0.10 * @since 4.11 */ - @Beta(Reason.SERVER) public static Bson metaVectorSearchScore(final String fieldName) { return meta(fieldName, "vectorSearchScore"); } diff --git a/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java index 04faa18769e..d8e920990f9 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java @@ -16,8 +16,6 @@ package com.mongodb.client.model.search; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Aggregates; @@ -32,6 +30,5 @@ * @since 5.2 */ @Sealed -@Beta(Reason.SERVER) public interface ApproximateVectorSearchOptions extends VectorSearchOptions { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java index ff8bf01e956..d58b69e5a37 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java @@ -16,8 +16,6 @@ package com.mongodb.client.model.search; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Aggregates; @@ -33,6 +31,5 @@ * @since 5.2 */ @Sealed -@Beta(Reason.SERVER) public interface ExactVectorSearchOptions extends VectorSearchOptions { } diff --git a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java index f27a4a2828b..073c05b2371 100644 --- a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java @@ -15,8 +15,6 @@ */ package com.mongodb.client.model.search; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.annotations.Sealed; import com.mongodb.client.model.Aggregates; import com.mongodb.client.model.Filters; @@ -31,7 +29,6 @@ * @since 4.11 */ @Sealed -@Beta(Reason.SERVER) public interface VectorSearchOptions extends Bson { /** * Creates a new {@link VectorSearchOptions} with the filter specified. diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala index ed08ad5d551..c7b8d120cf7 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala @@ -737,7 +737,6 @@ object Aggregates { * @note Requires MongoDB 6.0.10 or greater * @since 4.11 */ - @Beta(Array(Reason.SERVER)) def vectorSearch( path: FieldSearchPath, queryVector: Iterable[java.lang.Double], diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala index 0778399fc4b..6911ec0f653 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala @@ -25,7 +25,6 @@ import com.mongodb.client.model.search.{ VectorSearchOptions => JVectorSearchOpt * @note Requires MongoDB 6.0.11, or greater * @since 4.11 */ -@Beta(Array(Reason.SERVER)) object VectorSearchOptions { /** From f60fd14c39e2f971e8e6b710c0bd59426a4f95ce Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Mon, 23 Sep 2024 12:47:31 -0600 Subject: [PATCH 81/90] Fix `javadoc` waring in `MongoCryptException` (#1504) --- .../src/main/com/mongodb/crypt/capi/MongoCryptException.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java index f2fdaeb7699..c3110297ae4 100644 --- a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java @@ -20,6 +20,8 @@ /** * Exception thrown for errors originating in the mongodb-crypt module. + * + * @serial exclude */ public class MongoCryptException extends RuntimeException { private static final long serialVersionUID = -5524416583514807953L; From 161feebd9110ac656f551a0c0e63d0d76258890b Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Mon, 23 Sep 2024 13:49:57 -0600 Subject: [PATCH 82/90] Remove unused imports from `Aggregates`, `Projections` (#1505) --- driver-core/src/main/com/mongodb/client/model/Aggregates.java | 2 -- driver-core/src/main/com/mongodb/client/model/Projections.java | 2 -- 2 files changed, 4 deletions(-) diff --git a/driver-core/src/main/com/mongodb/client/model/Aggregates.java b/driver-core/src/main/com/mongodb/client/model/Aggregates.java index 7d521ed44a2..4bb3a03771c 100644 --- a/driver-core/src/main/com/mongodb/client/model/Aggregates.java +++ b/driver-core/src/main/com/mongodb/client/model/Aggregates.java @@ -17,8 +17,6 @@ package com.mongodb.client.model; import com.mongodb.MongoNamespace; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.client.model.densify.DensifyOptions; import com.mongodb.client.model.densify.DensifyRange; import com.mongodb.client.model.fill.FillOptions; diff --git a/driver-core/src/main/com/mongodb/client/model/Projections.java b/driver-core/src/main/com/mongodb/client/model/Projections.java index 11bcfcffc62..470c3cb7e4a 100644 --- a/driver-core/src/main/com/mongodb/client/model/Projections.java +++ b/driver-core/src/main/com/mongodb/client/model/Projections.java @@ -16,8 +16,6 @@ package com.mongodb.client.model; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.client.model.search.FieldSearchPath; import com.mongodb.client.model.search.SearchCollector; import com.mongodb.client.model.search.SearchCount; From a138393eb28be2034fef33e7536c272291faae63 Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Mon, 23 Sep 2024 22:17:29 +0100 Subject: [PATCH 83/90] Remove remaining beta annotations for encrypted fields. (#1503) Encrypted fields are used as part of Range encryption which is no longer in server preview. JAVA-5441 --- .../com/mongodb/MongoUpdatedEncryptedFieldsException.java | 3 --- .../com/mongodb/client/model/CreateCollectionOptions.java | 4 ---- .../mongodb/client/model/CreateEncryptedCollectionParams.java | 3 --- .../main/com/mongodb/client/model/DropCollectionOptions.java | 4 ---- .../main/com/mongodb/client/model/vault/EncryptOptions.java | 3 --- .../mongodb/reactivestreams/client/internal/crypt/Crypt.java | 3 --- 6 files changed, 20 deletions(-) diff --git a/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java index c91a3c87fc5..6c4b10ac0bc 100644 --- a/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java +++ b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java @@ -15,8 +15,6 @@ */ package com.mongodb; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import org.bson.BsonDocument; import static com.mongodb.assertions.Assertions.assertNotNull; @@ -27,7 +25,6 @@ * * @since 4.9 */ -@Beta(Reason.SERVER) public final class MongoUpdatedEncryptedFieldsException extends MongoClientException { private static final long serialVersionUID = 1; diff --git a/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java index 31165688d4a..f0ea455607d 100644 --- a/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java @@ -17,8 +17,6 @@ package com.mongodb.client.model; import com.mongodb.AutoEncryptionSettings; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.conversions.Bson; @@ -354,7 +352,6 @@ public CreateCollectionOptions changeStreamPreAndPostImagesOptions( * @since 4.7 * @mongodb.server.release 7.0 */ - @Beta(Reason.SERVER) @Nullable public Bson getEncryptedFields() { return encryptedFields; @@ -371,7 +368,6 @@ public Bson getEncryptedFields() { * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption * @mongodb.server.release 7.0 */ - @Beta(Reason.SERVER) public CreateCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) { this.encryptedFields = encryptedFields; return this; diff --git a/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java index 537efdc1716..8df26cad913 100644 --- a/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java +++ b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java @@ -16,8 +16,6 @@ package com.mongodb.client.model; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -29,7 +27,6 @@ * * @since 4.9 */ -@Beta(Reason.SERVER) public final class CreateEncryptedCollectionParams { private final String kmsProvider; @Nullable diff --git a/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java index cf2dbca66c4..5ae247547b8 100644 --- a/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java @@ -17,8 +17,6 @@ package com.mongodb.client.model; import com.mongodb.AutoEncryptionSettings; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.conversions.Bson; @@ -40,7 +38,6 @@ public class DropCollectionOptions { * @since 4.7 * @mongodb.server.release 7.0 */ - @Beta(Reason.SERVER) @Nullable public Bson getEncryptedFields() { return encryptedFields; @@ -57,7 +54,6 @@ public Bson getEncryptedFields() { * @mongodb.server.release 7.0 * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption */ - @Beta(Reason.SERVER) public DropCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) { this.encryptedFields = encryptedFields; return this; diff --git a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java index cfdf833e892..91f722e8e15 100644 --- a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java +++ b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java @@ -16,8 +16,6 @@ package com.mongodb.client.model.vault; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.lang.Nullable; import org.bson.BsonBinary; @@ -192,7 +190,6 @@ public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) { * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption */ @Nullable - @Beta(Reason.SERVER) public RangeOptions getRangeOptions() { return rangeOptions; } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java index dcfceedf155..17d82e32c49 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java @@ -19,8 +19,6 @@ import com.mongodb.MongoClientException; import com.mongodb.MongoException; import com.mongodb.MongoInternalException; -import com.mongodb.annotations.Beta; -import com.mongodb.annotations.Reason; import com.mongodb.client.model.vault.DataKeyOptions; import com.mongodb.client.model.vault.EncryptOptions; import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; @@ -191,7 +189,6 @@ public Mono encryptExplicitly(final BsonValue value, final EncryptOp * @since 4.9 * @mongodb.server.release 6.2 */ - @Beta(Reason.SERVER) public Mono encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout operationTimeout) { return executeStateMachine(() -> mongoCrypt.createEncryptExpressionContext(new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options)), operationTimeout From 49f7eb43b5ea9f9d7f2fb434d80956f489180606 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Mon, 23 Sep 2024 15:27:19 -0600 Subject: [PATCH 84/90] Fix OSGi crypt manifest entries (#1506) --- driver-core/build.gradle | 2 +- driver-reactive-streams/build.gradle | 1 + driver-sync/build.gradle | 1 + mongodb-crypt/build.gradle.kts | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/driver-core/build.gradle b/driver-core/build.gradle index 72cd74104f5..c23a24a9fb8 100644 --- a/driver-core/build.gradle +++ b/driver-core/build.gradle @@ -89,7 +89,7 @@ afterEvaluate { 'com.github.luben.zstd.*;resolution:=optional', 'org.slf4j.*;resolution:=optional', 'jnr.unixsocket.*;resolution:=optional', - 'com.mongodb.crypt.capi.*;resolution:=optional', + 'com.mongodb.internal.crypt.capi.*;resolution:=optional', 'jdk.net.*;resolution:=optional', // Used by SocketStreamHelper & depends on JDK version 'org.bson.codecs.record.*;resolution:=optional', // Depends on JDK version 'org.bson.codecs.kotlin.*;resolution:=optional', diff --git a/driver-reactive-streams/build.gradle b/driver-reactive-streams/build.gradle index 5a08997e6e8..7b86f010484 100644 --- a/driver-reactive-streams/build.gradle +++ b/driver-reactive-streams/build.gradle @@ -75,6 +75,7 @@ afterEvaluate { jar.manifest.attributes['Bundle-SymbolicName'] = 'org.mongodb.driver-reactivestreams' jar.manifest.attributes['Import-Package'] = [ 'com.mongodb.crypt.capi.*;resolution:=optional', + 'com.mongodb.internal.crypt.capi.*;resolution:=optional', '*', ].join(',') } diff --git a/driver-sync/build.gradle b/driver-sync/build.gradle index eb10ef62ebf..1c2f3ac6c59 100644 --- a/driver-sync/build.gradle +++ b/driver-sync/build.gradle @@ -49,6 +49,7 @@ afterEvaluate { jar.manifest.attributes['Bundle-SymbolicName'] = 'org.mongodb.driver-sync' jar.manifest.attributes['Import-Package'] = [ 'com.mongodb.crypt.capi.*;resolution:=optional', + 'com.mongodb.internal.crypt.capi.*;resolution:=optional', '*', ].join(',') } diff --git a/mongodb-crypt/build.gradle.kts b/mongodb-crypt/build.gradle.kts index bf2fef544ff..6c07a315185 100644 --- a/mongodb-crypt/build.gradle.kts +++ b/mongodb-crypt/build.gradle.kts @@ -175,7 +175,7 @@ afterEvaluate { tasks.jar { manifest { attributes( - "-exportcontents" to "com.mongodb.crypt.capi.*;-noimport:=true", + "-exportcontents" to "com.mongodb.*;-noimport:=true", "Automatic-Module-Name" to "com.mongodb.crypt.capi", "Import-Package" to "org.slf4j.*;resolution:=optional,org.bson.*", "Bundle-Name" to "MongoCrypt", From 0630764488cef0f20aa3f37f6f4018fb6a959e04 Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Mon, 23 Sep 2024 15:41:32 -0600 Subject: [PATCH 85/90] Fix `scaladoc` warnings (#1507) --- driver-scala/build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/driver-scala/build.gradle b/driver-scala/build.gradle index f9852968f05..4490ed39538 100644 --- a/driver-scala/build.gradle +++ b/driver-scala/build.gradle @@ -21,6 +21,7 @@ archivesBaseName = 'mongo-scala-driver' dependencies { implementation project(path: ':bson-scala', configuration: 'default') implementation project(path: ':driver-reactive-streams', configuration: 'default') + compileOnly 'com.google.code.findbugs:jsr305:1.3.9' testImplementation project(':driver-sync') testImplementation project(':bson').sourceSets.test.output From c717171237d5eac76984acc0d7d2e80619fc64bb Mon Sep 17 00:00:00 2001 From: Valentin Kovalenko Date: Mon, 23 Sep 2024 21:29:22 -0600 Subject: [PATCH 86/90] Fix `:mongodb-crypt` GraalVM metadata (#1508) --- .../META-INF/native-image/jni-config.json | 20 ++++++++-------- .../META-INF/native-image/reflect-config.json | 24 +++++++++---------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json b/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json index 44e398cb556..62ca1f8abae 100644 --- a/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json +++ b/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json @@ -1,23 +1,23 @@ [ { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_crypto_fn", - "methods":[{"name":"crypt","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_crypto_fn", + "methods":[{"name":"crypt","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hash_fn", - "methods":[{"name":"hash","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hash_fn", + "methods":[{"name":"hash","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hmac_fn", - "methods":[{"name":"hmac","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hmac_fn", + "methods":[{"name":"hmac","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_log_fn_t", - "methods":[{"name":"log","parameterTypes":["int","com.mongodb.crypt.capi.CAPI$cstring","int","com.sun.jna.Pointer"] }] + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_log_fn_t", + "methods":[{"name":"log","parameterTypes":["int","com.mongodb.internal.crypt.capi.CAPI$cstring","int","com.sun.jna.Pointer"] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_random_fn", - "methods":[{"name":"random","parameterTypes":["com.sun.jna.Pointer","com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t","int","com.mongodb.crypt.capi.CAPI$mongocrypt_status_t"] }] + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_random_fn", + "methods":[{"name":"random","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","int","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] }, { "name":"com.sun.jna.Callback" diff --git a/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json b/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json index 4187c0e8eab..c5ca33e6413 100644 --- a/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json +++ b/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json @@ -1,56 +1,56 @@ [ { - "name":"com.mongodb.crypt.capi.CAPI", + "name":"com.mongodb.internal.crypt.capi.CAPI", "allPublicFields":true, "queryAllDeclaredMethods":true }, { - "name":"com.mongodb.crypt.capi.CAPI$cstring", + "name":"com.mongodb.internal.crypt.capi.CAPI$cstring", "methods":[{"name":"","parameterTypes":[] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_binary_t", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t", "methods":[{"name":"","parameterTypes":[] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_crypto_fn", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_crypto_fn", "queryAllDeclaredMethods":true, "queryAllPublicMethods":true }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_ctx_t", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_ctx_t", "methods":[{"name":"","parameterTypes":[] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hash_fn", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hash_fn", "queryAllDeclaredMethods":true, "queryAllPublicMethods":true }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_hmac_fn", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hmac_fn", "queryAllDeclaredMethods":true, "queryAllPublicMethods":true }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_kms_ctx_t", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_kms_ctx_t", "methods":[{"name":"","parameterTypes":[] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_log_fn_t", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_log_fn_t", "queryAllDeclaredMethods":true, "queryAllPublicMethods":true }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_random_fn", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_random_fn", "queryAllDeclaredMethods":true, "queryAllPublicMethods":true }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_status_t", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t", "methods":[{"name":"","parameterTypes":[] }] }, { - "name":"com.mongodb.crypt.capi.CAPI$mongocrypt_t", + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_t", "methods":[{"name":"","parameterTypes":[] }] }, { From a5dc4568a52e584d60b1c9a98b4eea32347abc7f Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Tue, 24 Sep 2024 17:16:24 +0100 Subject: [PATCH 87/90] Revert making BsonEncoder / BsonDecoder internal (#1510) Made BsonEncoder / Decoder internal as part of the JsonElement support, however, this reduces the flexibility of the API and that change should be reverted. JAVA-5623 --- .../org/bson/codecs/kotlinx/BsonDecoder.kt | 80 ++---------- .../org/bson/codecs/kotlinx/BsonEncoder.kt | 28 +---- .../codecs/kotlinx/KotlinSerializerCodec.kt | 6 +- .../codecs/kotlinx/utils/BsonCodecUtils.kt | 119 ++++++++++++++++++ 4 files changed, 135 insertions(+), 98 deletions(-) create mode 100644 bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/BsonCodecUtils.kt diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt index 68ecbbabc13..99e5d2acb17 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt @@ -37,10 +37,11 @@ import org.bson.BsonType import org.bson.BsonValue import org.bson.codecs.BsonValueCodec import org.bson.codecs.DecoderContext -import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonArrayDecoder -import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonDocumentDecoder -import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonMapDecoder -import org.bson.codecs.kotlinx.BsonDecoder.Companion.createBsonPolymorphicDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonArrayDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonDocumentDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonMapDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonPolymorphicDecoder import org.bson.internal.NumberCodecHelper import org.bson.internal.StringCodecHelper import org.bson.types.ObjectId @@ -51,75 +52,12 @@ import org.bson.types.ObjectId * For custom serialization handlers */ @ExperimentalSerializationApi -internal sealed interface BsonDecoder : Decoder, CompositeDecoder { - - /** Factory helper for creating concrete BsonDecoder implementations */ - companion object { - - @Suppress("SwallowedException") - private val hasJsonDecoder: Boolean by lazy { - try { - Class.forName("kotlinx.serialization.json.JsonDecoder") - true - } catch (e: ClassNotFoundException) { - false - } - } - - fun createBsonDecoder( - reader: AbstractBsonReader, - serializersModule: SerializersModule, - configuration: BsonConfiguration - ): BsonDecoder { - return if (hasJsonDecoder) JsonBsonDecoderImpl(reader, serializersModule, configuration) - else BsonDecoderImpl(reader, serializersModule, configuration) - } - - fun createBsonArrayDecoder( - descriptor: SerialDescriptor, - reader: AbstractBsonReader, - serializersModule: SerializersModule, - configuration: BsonConfiguration - ): BsonArrayDecoder { - return if (hasJsonDecoder) JsonBsonArrayDecoder(descriptor, reader, serializersModule, configuration) - else BsonArrayDecoder(descriptor, reader, serializersModule, configuration) - } - - fun createBsonDocumentDecoder( - descriptor: SerialDescriptor, - reader: AbstractBsonReader, - serializersModule: SerializersModule, - configuration: BsonConfiguration - ): BsonDocumentDecoder { - return if (hasJsonDecoder) JsonBsonDocumentDecoder(descriptor, reader, serializersModule, configuration) - else BsonDocumentDecoder(descriptor, reader, serializersModule, configuration) - } - - fun createBsonPolymorphicDecoder( - descriptor: SerialDescriptor, - reader: AbstractBsonReader, - serializersModule: SerializersModule, - configuration: BsonConfiguration - ): BsonPolymorphicDecoder { - return if (hasJsonDecoder) JsonBsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) - else BsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) - } - - fun createBsonMapDecoder( - descriptor: SerialDescriptor, - reader: AbstractBsonReader, - serializersModule: SerializersModule, - configuration: BsonConfiguration - ): BsonMapDecoder { - return if (hasJsonDecoder) JsonBsonMapDecoder(descriptor, reader, serializersModule, configuration) - else BsonMapDecoder(descriptor, reader, serializersModule, configuration) - } - } +public sealed interface BsonDecoder : Decoder, CompositeDecoder { /** @return the decoded ObjectId */ - fun decodeObjectId(): ObjectId + public fun decodeObjectId(): ObjectId /** @return the decoded BsonValue */ - fun decodeBsonValue(): BsonValue + public fun decodeBsonValue(): BsonValue } @OptIn(ExperimentalSerializationApi::class) @@ -325,7 +263,7 @@ internal open class BsonPolymorphicDecoder( it.reset() mark = null } - return deserializer.deserialize(BsonDecoder.createBsonDecoder(reader, serializersModule, configuration)) + return deserializer.deserialize(createBsonDecoder(reader, serializersModule, configuration)) } override fun decodeElementIndex(descriptor: SerialDescriptor): Int { diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt index 899b1b7a981..1470bbb76a5 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt @@ -39,43 +39,21 @@ import org.bson.types.ObjectId * For custom serialization handlers */ @ExperimentalSerializationApi -internal sealed interface BsonEncoder : Encoder, CompositeEncoder { - - /** Factory helper for creating concrete BsonEncoder implementations */ - companion object { - @Suppress("SwallowedException") - private val hasJsonEncoder: Boolean by lazy { - try { - Class.forName("kotlinx.serialization.json.JsonEncoder") - true - } catch (e: ClassNotFoundException) { - false - } - } - - fun createBsonEncoder( - writer: BsonWriter, - serializersModule: SerializersModule, - configuration: BsonConfiguration - ): BsonEncoder { - return if (hasJsonEncoder) JsonBsonEncoder(writer, serializersModule, configuration) - else BsonEncoderImpl(writer, serializersModule, configuration) - } - } +public sealed interface BsonEncoder : Encoder, CompositeEncoder { /** * Encodes an ObjectId * * @param value the ObjectId */ - fun encodeObjectId(value: ObjectId) + public fun encodeObjectId(value: ObjectId) /** * Encodes a BsonValue * * @param value the BsonValue */ - fun encodeBsonValue(value: BsonValue) + public fun encodeBsonValue(value: BsonValue) } /** diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt index 41e674568a5..0c7491b2278 100644 --- a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt @@ -34,6 +34,8 @@ import org.bson.codecs.Codec import org.bson.codecs.DecoderContext import org.bson.codecs.EncoderContext import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonEncoder import org.bson.codecs.pojo.annotations.BsonCreator import org.bson.codecs.pojo.annotations.BsonDiscriminator import org.bson.codecs.pojo.annotations.BsonExtraElements @@ -172,13 +174,13 @@ private constructor( } override fun encode(writer: BsonWriter, value: T, encoderContext: EncoderContext) { - serializer.serialize(BsonEncoder.createBsonEncoder(writer, serializersModule, bsonConfiguration), value) + serializer.serialize(createBsonEncoder(writer, serializersModule, bsonConfiguration), value) } override fun getEncoderClass(): Class = kClass.java override fun decode(reader: BsonReader, decoderContext: DecoderContext): T { require(reader is AbstractBsonReader) - return serializer.deserialize(BsonDecoder.createBsonDecoder(reader, serializersModule, bsonConfiguration)) + return serializer.deserialize(createBsonDecoder(reader, serializersModule, bsonConfiguration)) } } diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/BsonCodecUtils.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/BsonCodecUtils.kt new file mode 100644 index 00000000000..eabfebc5833 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/BsonCodecUtils.kt @@ -0,0 +1,119 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx.utils + +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.modules.SerializersModule +import org.bson.AbstractBsonReader +import org.bson.BsonWriter +import org.bson.codecs.kotlinx.BsonArrayDecoder +import org.bson.codecs.kotlinx.BsonConfiguration +import org.bson.codecs.kotlinx.BsonDecoder +import org.bson.codecs.kotlinx.BsonDecoderImpl +import org.bson.codecs.kotlinx.BsonDocumentDecoder +import org.bson.codecs.kotlinx.BsonEncoder +import org.bson.codecs.kotlinx.BsonEncoderImpl +import org.bson.codecs.kotlinx.BsonMapDecoder +import org.bson.codecs.kotlinx.BsonPolymorphicDecoder +import org.bson.codecs.kotlinx.JsonBsonArrayDecoder +import org.bson.codecs.kotlinx.JsonBsonDecoderImpl +import org.bson.codecs.kotlinx.JsonBsonDocumentDecoder +import org.bson.codecs.kotlinx.JsonBsonEncoder +import org.bson.codecs.kotlinx.JsonBsonMapDecoder +import org.bson.codecs.kotlinx.JsonBsonPolymorphicDecoder + +@ExperimentalSerializationApi +internal object BsonCodecUtils { + + @Suppress("SwallowedException") + private val hasJsonEncoder: Boolean by lazy { + try { + Class.forName("kotlinx.serialization.json.JsonEncoder") + true + } catch (e: ClassNotFoundException) { + false + } + } + + @Suppress("SwallowedException") + private val hasJsonDecoder: Boolean by lazy { + try { + Class.forName("kotlinx.serialization.json.JsonDecoder") + true + } catch (e: ClassNotFoundException) { + false + } + } + + internal fun createBsonEncoder( + writer: BsonWriter, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonEncoder { + return if (hasJsonEncoder) JsonBsonEncoder(writer, serializersModule, configuration) + else BsonEncoderImpl(writer, serializersModule, configuration) + } + + internal fun createBsonDecoder( + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonDecoder { + return if (hasJsonDecoder) JsonBsonDecoderImpl(reader, serializersModule, configuration) + else BsonDecoderImpl(reader, serializersModule, configuration) + } + + internal fun createBsonArrayDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonArrayDecoder { + return if (hasJsonDecoder) JsonBsonArrayDecoder(descriptor, reader, serializersModule, configuration) + else BsonArrayDecoder(descriptor, reader, serializersModule, configuration) + } + + internal fun createBsonDocumentDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonDocumentDecoder { + return if (hasJsonDecoder) JsonBsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + else BsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + } + + internal fun createBsonPolymorphicDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonPolymorphicDecoder { + return if (hasJsonDecoder) JsonBsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + else BsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + } + + internal fun createBsonMapDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonMapDecoder { + return if (hasJsonDecoder) JsonBsonMapDecoder(descriptor, reader, serializersModule, configuration) + else BsonMapDecoder(descriptor, reader, serializersModule, configuration) + } +} From 525cc45b43c15cdccdf3572e97bd6aa78b3668d2 Mon Sep 17 00:00:00 2001 From: Ross Lawley <420+rozza@users.noreply.github.com> Date: Tue, 24 Sep 2024 16:37:59 +0000 Subject: [PATCH 88/90] Version: bump 5.2.0 --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index d9ebd912fb8..fd058b9487d 100644 --- a/build.gradle +++ b/build.gradle @@ -75,7 +75,7 @@ configure(coreProjects) { apply plugin: 'idea' group = 'org.mongodb' - version = '5.2.0-SNAPSHOT' + version = '5.2.0' repositories { mavenLocal() From 078a8fe60640e80b564853db309c9eb2ebf92a2b Mon Sep 17 00:00:00 2001 From: Ross Lawley <420+rozza@users.noreply.github.com> Date: Tue, 24 Sep 2024 16:37:59 +0000 Subject: [PATCH 89/90] Version: bump 5.2.1-SNAPSHOT --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index fd058b9487d..3a3e5810b20 100644 --- a/build.gradle +++ b/build.gradle @@ -75,7 +75,7 @@ configure(coreProjects) { apply plugin: 'idea' group = 'org.mongodb' - version = '5.2.0' + version = '5.2.1-SNAPSHOT' repositories { mavenLocal() From e34283d11e0624ced3ef60ea11970d16a377d2bd Mon Sep 17 00:00:00 2001 From: Ross Lawley Date: Tue, 24 Sep 2024 19:05:51 +0100 Subject: [PATCH 90/90] Version: revert to 5.2.0 --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 3a3e5810b20..fd058b9487d 100644 --- a/build.gradle +++ b/build.gradle @@ -75,7 +75,7 @@ configure(coreProjects) { apply plugin: 'idea' group = 'org.mongodb' - version = '5.2.1-SNAPSHOT' + version = '5.2.0' repositories { mavenLocal() pFad - Phonifier reborn

      Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

      Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


      Alternative Proxies:

      Alternative Proxy

      pFad Proxy

      pFad v3 Proxy

      pFad v4 Proxy