diff --git a/mongo/integration/cmd_monitoring_helpers_test.go b/mongo/integration/cmd_monitoring_helpers_test.go index 6c4931d26c..ef821d9e9a 100644 --- a/mongo/integration/cmd_monitoring_helpers_test.go +++ b/mongo/integration/cmd_monitoring_helpers_test.go @@ -82,7 +82,24 @@ func compareValues(mt *mtest.T, key string, expected, actual bson.RawValue) erro if typeVal, err := e.LookupErr("$$type"); err == nil { // $$type represents a type assertion // for example {field: {$$type: "binData"}} should assert that "field" is an element with a binary value - return checkValueType(mt, key, actual.Type, typeVal.StringValue()) + switch typ := typeVal.Type; typ { + case bson.TypeString: + return checkValueType(mt, key, actual.Type, typeVal.StringValue()) + case bson.TypeArray: + array := typeVal.Array() + elems, err := array.Values() + if err != nil { + return err + } + for _, elem := range elems { + if checkValueType(mt, key, actual.Type, elem.StringValue()) == nil { + return nil + } + } + return fmt.Errorf("BSON type mismatch for key %q; expected %s, got %q", key, array, actual.Type) + default: + return fmt.Errorf("unsupported $$type: %q", typ) + } } a := actual.Document() diff --git a/mongo/integration/json_helpers_test.go b/mongo/integration/json_helpers_test.go index 463d1e54dc..51559debca 100644 --- a/mongo/integration/json_helpers_test.go +++ b/mongo/integration/json_helpers_test.go @@ -114,6 +114,8 @@ func createClientOptions(t testing.TB, opts bson.Raw) *options.ClientOptions { case "socketTimeoutMS": st := convertValueToMilliseconds(t, opt) clientOpts.SetSocketTimeout(st) + case "timeoutMS": + clientOpts.SetTimeout(time.Duration(opt.Int32()) * time.Millisecond) case "minPoolSize": clientOpts.SetMinPoolSize(uint64(opt.AsInt64())) case "maxPoolSize": @@ -470,8 +472,9 @@ func errorFromResult(t testing.TB, result interface{}) *operationError { if err != nil { return nil } - if expected.ErrorCodeName == nil && expected.ErrorContains == nil && len(expected.ErrorLabelsOmit) == 0 && - len(expected.ErrorLabelsContain) == 0 { + if expected.ErrorCodeName == nil && expected.ErrorContains == nil && + len(expected.ErrorLabelsOmit) == 0 && len(expected.ErrorLabelsContain) == 0 && + expected.IsTimeoutError == nil { return nil } @@ -563,6 +566,13 @@ func verifyError(expected *operationError, actual error) error { return fmt.Errorf("expected error %w to not contain label %q", actual, label) } } + if expected.IsTimeoutError != nil { + isTimeoutError := mongo.IsTimeout(actual) + if *expected.IsTimeoutError != isTimeoutError { + return fmt.Errorf("expected error %w to be a timeout error: %v, is timeout error: %v", + actual, *expected.IsTimeoutError, isTimeoutError) + } + } return nil } diff --git a/mongo/integration/mtest/mongotest.go b/mongo/integration/mtest/mongotest.go index 41292e67a3..fb0833ba32 100644 --- a/mongo/integration/mtest/mongotest.go +++ b/mongo/integration/mtest/mongotest.go @@ -84,6 +84,11 @@ type WriteConcernErrorData struct { ErrInfo bson.Raw `bson:"errInfo,omitempty"` } +type failPoint struct { + name string + client *mongo.Client +} + // T is a wrapper around testing.T. type T struct { // connsCheckedOut is the net number of connections checked out during test execution. @@ -103,7 +108,7 @@ type T struct { createdColls []*Collection // collections created in this test proxyDialer *proxyDialer dbName, collName string - failPointNames []string + failPoints []failPoint minServerVersion string maxServerVersion string validTopologies []TopologyKind @@ -128,14 +133,16 @@ type T struct { succeeded []*event.CommandSucceededEvent failed []*event.CommandFailedEvent - Client *mongo.Client - DB *mongo.Database - Coll *mongo.Collection + Client *mongo.Client + fpClients map[*mongo.Client]bool + DB *mongo.Database + Coll *mongo.Collection } func newT(wrapped *testing.T, opts ...*Options) *T { t := &T{ - T: wrapped, + T: wrapped, + fpClients: make(map[*mongo.Client]bool), } for _, opt := range opts { for _, optFn := range opt.optFuncs { @@ -202,6 +209,12 @@ func (t *T) cleanup() { // always disconnect the client regardless of clientType because Client.Disconnect will work against // all deployments _ = t.Client.Disconnect(context.Background()) + for client, v := range t.fpClients { + if v { + _ = client.Disconnect(context.Background()) + } + } + t.fpClients = nil } // Run creates a new T instance for a sub-test and runs the given callback. It also creates a new collection using the @@ -254,9 +267,11 @@ func (t *T) RunOpts(name string, opts *Options, callback func(mt *T)) { sub.ClearFailPoints() sub.ClearCollections() } - // only disconnect client if it's not being shared + // only disconnect client if it's not being shared and not used by fail points. if sub.shareClient == nil || !*sub.shareClient { - _ = sub.Client.Disconnect(context.Background()) + if _, ok := sub.fpClients[sub.Client]; !ok { + _ = sub.Client.Disconnect(context.Background()) + } } assert.Equal(sub, 0, sessions, "%v sessions checked out", sessions) assert.Equal(sub, 0, conns, "%v connections checked out", conns) @@ -405,7 +420,10 @@ func (t *T) ResetClient(opts *options.ClientOptions) { t.clientOpts = opts } - _ = t.Client.Disconnect(context.Background()) + // Disconnect client if it is not being used by fail points. + if _, ok := t.fpClients[t.Client]; !ok { + _ = t.Client.Disconnect(context.Background()) + } t.createTestClient() t.DB = t.Client.Database(t.dbName) t.Coll = t.DB.Collection(t.collName, t.collOpts) @@ -562,7 +580,8 @@ func (t *T) SetFailPoint(fp FailPoint) { if err := SetFailPoint(fp, t.Client); err != nil { t.Fatal(err) } - t.failPointNames = append(t.failPointNames, fp.ConfigureFailPoint) + t.fpClients[t.Client] = true + t.failPoints = append(t.failPoints, failPoint{fp.ConfigureFailPoint, t.Client}) } // SetFailPointFromDocument sets the fail point represented by the given document for the client associated with T. This @@ -574,30 +593,37 @@ func (t *T) SetFailPointFromDocument(fp bson.Raw) { t.Fatal(err) } + t.fpClients[t.Client] = true name := fp.Index(0).Value().StringValue() - t.failPointNames = append(t.failPointNames, name) + t.failPoints = append(t.failPoints, failPoint{name, t.Client}) } // TrackFailPoint adds the given fail point to the list of fail points to be disabled when the current test finishes. // This function does not create a fail point on the server. -func (t *T) TrackFailPoint(fpName string) { - t.failPointNames = append(t.failPointNames, fpName) +func (t *T) TrackFailPoint(fpName string, client *mongo.Client) { + t.fpClients[client] = true + t.failPoints = append(t.failPoints, failPoint{fpName, client}) } // ClearFailPoints disables all previously set failpoints for this test. func (t *T) ClearFailPoints() { - db := t.Client.Database("admin") - for _, fp := range t.failPointNames { + for _, fp := range t.failPoints { cmd := bson.D{ - {"configureFailPoint", fp}, + {"configureFailPoint", fp.name}, {"mode", "off"}, } - err := db.RunCommand(context.Background(), cmd).Err() + err := fp.client.Database("admin").RunCommand(context.Background(), cmd).Err() if err != nil { - t.Fatalf("error clearing fail point %s: %v", fp, err) + t.Fatalf("error clearing fail point %s: %v", fp.name, err) } + t.fpClients[fp.client] = false } - t.failPointNames = t.failPointNames[:0] + for client, active := range t.fpClients { + if !active && client != t.Client { + _ = client.Disconnect(context.Background()) + } + } + t.failPoints = t.failPoints[:0] } // CloneDatabase modifies the default database for this test to match the given options. @@ -684,19 +710,17 @@ func (t *T) createTestClient() { }) } - var err error + var uriOpts *options.ClientOptions switch t.clientType { case Pinned: // pin to first mongos pinnedHostList := []string{testContext.connString.Hosts[0]} - uriOpts := options.Client().ApplyURI(testContext.connString.Original).SetHosts(pinnedHostList) - t.Client, err = mongo.NewClient(uriOpts, clientOpts) + uriOpts = options.Client().ApplyURI(testContext.connString.Original).SetHosts(pinnedHostList) case Mock: // clear pool monitor to avoid configuration error clientOpts.PoolMonitor = nil t.mockDeployment = newMockDeployment() clientOpts.Deployment = t.mockDeployment - t.Client, err = mongo.NewClient(clientOpts) case Proxy: t.proxyDialer = newProxyDialer() clientOpts.SetDialer(t.proxyDialer) @@ -706,16 +730,17 @@ func (t *T) createTestClient() { case Default: // Use a different set of options to specify the URI because clientOpts may already have a URI or host seedlist // specified. - var uriOpts *options.ClientOptions if clientOpts.Deployment == nil { // Only specify URI if the deployment is not set to avoid setting topology/server options along with the // deployment. uriOpts = options.Client().ApplyURI(testContext.connString.Original) } - - // Pass in uriOpts first so clientOpts wins if there are any conflicting settings. - t.Client, err = mongo.NewClient(uriOpts, clientOpts) } + t.clientOpts = options.MergeClientOptions(uriOpts, clientOpts) + + var err error + // Pass in uriOpts first so clientOpts wins if there are any conflicting settings. + t.Client, err = mongo.NewClient(t.clientOpts) if err != nil { t.Fatalf("error creating client: %v", err) } diff --git a/mongo/integration/unified_spec_test.go b/mongo/integration/unified_spec_test.go index a28e9f6cf0..df2d705132 100644 --- a/mongo/integration/unified_spec_test.go +++ b/mongo/integration/unified_spec_test.go @@ -42,7 +42,7 @@ const ( gridFSFiles = "fs.files" gridFSChunks = "fs.chunks" spec1403SkipReason = "servers less than 4.2 do not have mongocryptd; see SPEC-1403" - godriver2123SkipReason = "failpoints and timeouts together cause failures; see GODRIVER-2123" + godriver2466SkipReason = "test has not been updated; see GODRIVER-2466" godriver2413SkipReason = "encryptedFields argument is not supported on Collection.Drop; see GODRIVER-2413" ) @@ -53,11 +53,8 @@ var ( // Currently, the test will fail because a server < 4.2 wouldn't have mongocryptd, so Client construction // would fail with a mongocryptd spawn error. "operation fails with maxWireVersion < 8": spec1403SkipReason, - // GODRIVER-2123: The two tests below use a failpoint and a socket or server selection timeout. - // The timeout causes the eventual clearing of the failpoint in the test runner to fail with an - // i/o timeout. - "Ignore network timeout error on find": godriver2123SkipReason, - "Network error on minPoolSize background creation": godriver2123SkipReason, + // GODRIVER-2466: The test below has not been updated as required. + "Network error on minPoolSize background creation": godriver2466SkipReason, "CreateCollection from encryptedFields.": godriver2413SkipReason, "DropCollection from encryptedFields": godriver2413SkipReason, "DropCollection from remote encryptedFields": godriver2413SkipReason, @@ -175,6 +172,7 @@ type operationError struct { ErrorCodeName *string `bson:"errorCodeName"` ErrorLabelsContain []string `bson:"errorLabelsContain"` ErrorLabelsOmit []string `bson:"errorLabelsOmit"` + IsTimeoutError *bool `bson:"isTimeoutError"` } const dataPath string = "../../testdata/" @@ -475,12 +473,11 @@ func executeTestRunnerOperation(mt *mtest.T, testCase *testCase, op *operation, if err != nil { return fmt.Errorf("Connect error for targeted client: %w", err) } - defer func() { _ = client.Disconnect(context.Background()) }() if err = client.Database("admin").RunCommand(context.Background(), fp).Err(); err != nil { return fmt.Errorf("error setting targeted fail point: %w", err) } - mt.TrackFailPoint(fp.ConfigureFailPoint) + mt.TrackFailPoint(fp.ConfigureFailPoint, client) case "configureFailPoint": fp, err := op.Arguments.LookupErr("failPoint") if err != nil { diff --git a/testdata/client-side-encryption/legacy/timeoutMS.json b/testdata/client-side-encryption/legacy/timeoutMS.json new file mode 100644 index 0000000000..9eb0974d40 --- /dev/null +++ b/testdata/client-side-encryption/legacy/timeoutMS.json @@ -0,0 +1,200 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "cse-timeouts-db", + "collection_name": "cse-timeouts-coll", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "command_name": "listCollections" + } + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ] + } + ] + } \ No newline at end of file diff --git a/testdata/client-side-encryption/legacy/timeoutMS.yml b/testdata/client-side-encryption/legacy/timeoutMS.yml new file mode 100644 index 0000000000..bb71d67650 --- /dev/null +++ b/testdata/client-side-encryption/legacy/timeoutMS.yml @@ -0,0 +1,67 @@ +runOn: + - minServerVersion: "4.4" +database_name: &database_name "cse-timeouts-db" +collection_name: &collection_name "cse-timeouts-coll" + +data: [] +json_schema: {'properties': {'encrypted_w_altname': {'encrypt': {'keyId': '/altname', 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Random'}}, 'encrypted_string': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'}}, 'random': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Random'}}, 'encrypted_string_equivalent': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'}}}, 'bsonType': 'object'} +key_vault_data: [{'status': 1, '_id': {'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}, 'masterKey': {'provider': 'aws', 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', 'region': 'us-east-1'}, 'updateDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyMaterial': {'$binary': {'base64': 'AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyAltNames': ['altname', 'another_altname']}] + +tests: + - description: "timeoutMS applied to listCollections to get collection schema" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 60 + clientOptions: + autoEncryptOpts: + kmsProviders: + aws: {} # Credentials filled in from environment. + timeoutMS: 50 + operations: + - name: insertOne + arguments: + document: &doc0 { _id: 1, encrypted_string: "string0", random: "abc" } + result: + isTimeoutError: true + expectations: + # Auto encryption will request the collection info. + - command_started_event: + command: + listCollections: 1 + filter: + name: *collection_name + maxTimeMS: { $$type: ["int", "long"] } + command_name: listCollections + + # Test that timeoutMS applies to the sum of all operations done for client-side encryption. This is done by blocking + # listCollections and find for 30ms each and running an insertOne with timeoutMS=50. There should be one + # listCollections command and one "find" command, so the sum should take more than timeoutMS. A second listCollections + # event doesn't occur due to the internal MongoClient lacking configured auto encryption, plus libmongocrypt holds the + # collection schema in cache for a minute. + # + # This test does not include command monitoring expectations because the exact command sequence is dependent on the + # amount of time taken by mongocryptd communication. In slow runs, mongocryptd communication can breach the timeout + # and result in the final "find" not being sent. + - description: "remaining timeoutMS applied to find to get keyvault data" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections", "find"] + blockConnection: true + blockTimeMS: 30 + clientOptions: + autoEncryptOpts: + kmsProviders: + aws: {} # Credentials filled in from environment. + timeoutMS: 50 + operations: + - name: insertOne + arguments: + document: *doc0 + result: + isTimeoutError: true \ No newline at end of file