started mongo stuff, but it's a PITA. fuck mongo

This commit is contained in:
Pablo Martin 2025-06-02 17:24:23 +02:00
parent 4cd36ea3fc
commit 0f1bc5dff3
937 changed files with 205043 additions and 0 deletions

View file

@ -0,0 +1,172 @@
import { type Document, resolveBSONOptions } from './bson';
import type { Db } from './db';
import type { CommandOperationOptions } from './operations/command';
import { executeOperation } from './operations/execute_operation';
import {
ListDatabasesOperation,
type ListDatabasesOptions,
type ListDatabasesResult
} from './operations/list_databases';
import { RemoveUserOperation, type RemoveUserOptions } from './operations/remove_user';
import { RunAdminCommandOperation, type RunCommandOptions } from './operations/run_command';
import {
ValidateCollectionOperation,
type ValidateCollectionOptions
} from './operations/validate_collection';
/** @internal */
export interface AdminPrivate {
db: Db;
}
/**
* The **Admin** class is an internal class that allows convenient access to
* the admin functionality and commands for MongoDB.
*
* **ADMIN Cannot directly be instantiated**
* @public
*
* @example
* ```ts
* import { MongoClient } from 'mongodb';
*
* const client = new MongoClient('mongodb://localhost:27017');
* const admin = client.db().admin();
* const dbInfo = await admin.listDatabases();
* for (const db of dbInfo.databases) {
* console.log(db.name);
* }
* ```
*/
export class Admin {
/** @internal */
s: AdminPrivate;
/**
* Create a new Admin instance
* @internal
*/
constructor(db: Db) {
this.s = { db };
}
/**
* Execute a command
*
* The driver will ensure the following fields are attached to the command sent to the server:
* - `lsid` - sourced from an implicit session or options.session
* - `$readPreference` - defaults to primary or can be configured by options.readPreference
* - `$db` - sourced from the name of this database
*
* If the client has a serverApi setting:
* - `apiVersion`
* - `apiStrict`
* - `apiDeprecationErrors`
*
* When in a transaction:
* - `readConcern` - sourced from readConcern set on the TransactionOptions
* - `writeConcern` - sourced from writeConcern set on the TransactionOptions
*
* Attaching any of the above fields to the command will have no effect as the driver will overwrite the value.
*
* @param command - The command to execute
* @param options - Optional settings for the command
*/
async command(command: Document, options?: RunCommandOptions): Promise<Document> {
return await executeOperation(
this.s.db.client,
new RunAdminCommandOperation(command, {
...resolveBSONOptions(options),
session: options?.session,
readPreference: options?.readPreference,
timeoutMS: options?.timeoutMS ?? this.s.db.timeoutMS
})
);
}
/**
* Retrieve the server build information
*
* @param options - Optional settings for the command
*/
async buildInfo(options?: CommandOperationOptions): Promise<Document> {
return await this.command({ buildinfo: 1 }, options);
}
/**
* Retrieve the server build information
*
* @param options - Optional settings for the command
*/
async serverInfo(options?: CommandOperationOptions): Promise<Document> {
return await this.command({ buildinfo: 1 }, options);
}
/**
* Retrieve this db's server status.
*
* @param options - Optional settings for the command
*/
async serverStatus(options?: CommandOperationOptions): Promise<Document> {
return await this.command({ serverStatus: 1 }, options);
}
/**
* Ping the MongoDB server and retrieve results
*
* @param options - Optional settings for the command
*/
async ping(options?: CommandOperationOptions): Promise<Document> {
return await this.command({ ping: 1 }, options);
}
/**
* Remove a user from a database
*
* @param username - The username to remove
* @param options - Optional settings for the command
*/
async removeUser(username: string, options?: RemoveUserOptions): Promise<boolean> {
return await executeOperation(
this.s.db.client,
new RemoveUserOperation(this.s.db, username, { dbName: 'admin', ...options })
);
}
/**
* Validate an existing collection
*
* @param collectionName - The name of the collection to validate.
* @param options - Optional settings for the command
*/
async validateCollection(
collectionName: string,
options: ValidateCollectionOptions = {}
): Promise<Document> {
return await executeOperation(
this.s.db.client,
new ValidateCollectionOperation(this, collectionName, options)
);
}
/**
* List the available databases
*
* @param options - Optional settings for the command
*/
async listDatabases(options?: ListDatabasesOptions): Promise<ListDatabasesResult> {
return await executeOperation(
this.s.db.client,
new ListDatabasesOperation(this.s.db, { timeoutMS: this.s.db.timeoutMS, ...options })
);
}
/**
* Get ReplicaSet status
*
* @param options - Optional settings for the command
*/
async replSetGetStatus(options?: CommandOperationOptions): Promise<Document> {
return await this.command({ replSetGetStatus: 1 }, options);
}
}

View file

@ -0,0 +1,22 @@
import { type Document } from './bson';
export * from './index';
/**
* @internal
*
* Since we don't bundle tslib helpers, we need to polyfill this method.
*
* This is used in the generated JS. Adapted from https://github.com/microsoft/TypeScript/blob/aafdfe5b3f76f5c41abeec412ce73c86da94c75f/src/compiler/factory/emitHelpers.ts#L1202.
*/
function __exportStar(mod: Document) {
for (const key of Object.keys(mod)) {
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return mod[key];
}
});
}
}

View file

@ -0,0 +1,148 @@
/* eslint-disable no-restricted-imports */
import { BSON, type DeserializeOptions, type SerializeOptions } from 'bson';
export {
Binary,
BSON,
BSONError,
BSONRegExp,
BSONSymbol,
BSONType,
calculateObjectSize,
Code,
DBRef,
Decimal128,
deserialize,
type DeserializeOptions,
Document,
Double,
EJSON,
EJSONOptions,
Int32,
Long,
MaxKey,
MinKey,
ObjectId,
type ObjectIdLike,
serialize,
Timestamp,
UUID
} from 'bson';
/** @internal */
export type BSONElement = BSON.OnDemand['BSONElement'];
export function parseToElementsToArray(bytes: Uint8Array, offset?: number): BSONElement[] {
const res = BSON.onDemand.parseToElements(bytes, offset);
return Array.isArray(res) ? res : [...res];
}
export const getInt32LE = BSON.onDemand.NumberUtils.getInt32LE;
export const getFloat64LE = BSON.onDemand.NumberUtils.getFloat64LE;
export const getBigInt64LE = BSON.onDemand.NumberUtils.getBigInt64LE;
export const toUTF8 = BSON.onDemand.ByteUtils.toUTF8;
/**
* BSON Serialization options.
* @public
*/
export interface BSONSerializeOptions
extends Omit<SerializeOptions, 'index'>,
Omit<
DeserializeOptions,
| 'evalFunctions'
| 'cacheFunctions'
| 'cacheFunctionsCrc32'
| 'allowObjectSmallerThanBufferSize'
| 'index'
| 'validation'
> {
/**
* Enabling the raw option will return a [Node.js Buffer](https://nodejs.org/api/buffer.html)
* which is allocated using [allocUnsafe API](https://nodejs.org/api/buffer.html#static-method-bufferallocunsafesize).
* See this section from the [Node.js Docs here](https://nodejs.org/api/buffer.html#what-makes-bufferallocunsafe-and-bufferallocunsafeslow-unsafe)
* for more detail about what "unsafe" refers to in this context.
* If you need to maintain your own editable clone of the bytes returned for an extended life time of the process, it is recommended you allocate
* your own buffer and clone the contents:
*
* @example
* ```ts
* const raw = await collection.findOne({}, { raw: true });
* const myBuffer = Buffer.alloc(raw.byteLength);
* myBuffer.set(raw, 0);
* // Only save and use `myBuffer` beyond this point
* ```
*
* @remarks
* Please note there is a known limitation where this option cannot be used at the MongoClient level (see [NODE-3946](https://jira.mongodb.org/browse/NODE-3946)).
* It does correctly work at `Db`, `Collection`, and per operation the same as other BSON options work.
*/
raw?: boolean;
/** Enable utf8 validation when deserializing BSON documents. Defaults to true. */
enableUtf8Validation?: boolean;
}
export function pluckBSONSerializeOptions(options: BSONSerializeOptions): BSONSerializeOptions {
const {
fieldsAsRaw,
useBigInt64,
promoteValues,
promoteBuffers,
promoteLongs,
serializeFunctions,
ignoreUndefined,
bsonRegExp,
raw,
enableUtf8Validation
} = options;
return {
fieldsAsRaw,
useBigInt64,
promoteValues,
promoteBuffers,
promoteLongs,
serializeFunctions,
ignoreUndefined,
bsonRegExp,
raw,
enableUtf8Validation
};
}
/**
* Merge the given BSONSerializeOptions, preferring options over the parent's options, and
* substituting defaults for values not set.
*
* @internal
*/
export function resolveBSONOptions(
options?: BSONSerializeOptions,
parent?: { bsonOptions?: BSONSerializeOptions }
): BSONSerializeOptions {
const parentOptions = parent?.bsonOptions;
return {
raw: options?.raw ?? parentOptions?.raw ?? false,
useBigInt64: options?.useBigInt64 ?? parentOptions?.useBigInt64 ?? false,
promoteLongs: options?.promoteLongs ?? parentOptions?.promoteLongs ?? true,
promoteValues: options?.promoteValues ?? parentOptions?.promoteValues ?? true,
promoteBuffers: options?.promoteBuffers ?? parentOptions?.promoteBuffers ?? false,
ignoreUndefined: options?.ignoreUndefined ?? parentOptions?.ignoreUndefined ?? false,
bsonRegExp: options?.bsonRegExp ?? parentOptions?.bsonRegExp ?? false,
serializeFunctions: options?.serializeFunctions ?? parentOptions?.serializeFunctions ?? false,
fieldsAsRaw: options?.fieldsAsRaw ?? parentOptions?.fieldsAsRaw ?? {},
enableUtf8Validation:
options?.enableUtf8Validation ?? parentOptions?.enableUtf8Validation ?? true
};
}
/** @internal */
export function parseUtf8ValidationOption(options?: { enableUtf8Validation?: boolean }): {
utf8: { writeErrors: false } | false;
} {
const enableUtf8Validation = options?.enableUtf8Validation;
if (enableUtf8Validation === false) {
return { utf8: false };
}
return { utf8: { writeErrors: false } };
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,83 @@
import type { Document } from '../bson';
import * as BSON from '../bson';
import type { Collection } from '../collection';
import { MongoInvalidArgumentError } from '../error';
import type { DeleteStatement } from '../operations/delete';
import type { UpdateStatement } from '../operations/update';
import { Batch, BatchType, BulkOperationBase, type BulkWriteOptions } from './common';
/** @public */
export class OrderedBulkOperation extends BulkOperationBase {
/** @internal */
constructor(collection: Collection, options: BulkWriteOptions) {
super(collection, options, true);
}
addToOperationsList(
batchType: BatchType,
document: Document | UpdateStatement | DeleteStatement
): this {
// Get the bsonSize
const bsonSize = BSON.calculateObjectSize(document, {
checkKeys: false,
// Since we don't know what the user selected for BSON options here,
// err on the safe side, and check the size with ignoreUndefined: false.
ignoreUndefined: false
} as any);
// Throw error if the doc is bigger than the max BSON size
if (bsonSize >= this.s.maxBsonObjectSize)
// TODO(NODE-3483): Change this to MongoBSONError
throw new MongoInvalidArgumentError(
`Document is larger than the maximum size ${this.s.maxBsonObjectSize}`
);
// Create a new batch object if we don't have a current one
if (this.s.currentBatch == null) {
this.s.currentBatch = new Batch(batchType, this.s.currentIndex);
}
const maxKeySize = this.s.maxKeySize;
// Check if we need to create a new batch
if (
// New batch if we exceed the max batch op size
this.s.currentBatchSize + 1 >= this.s.maxWriteBatchSize ||
// New batch if we exceed the maxBatchSizeBytes. Only matters if batch already has a doc,
// since we can't sent an empty batch
(this.s.currentBatchSize > 0 &&
this.s.currentBatchSizeBytes + maxKeySize + bsonSize >= this.s.maxBatchSizeBytes) ||
// New batch if the new op does not have the same op type as the current batch
this.s.currentBatch.batchType !== batchType
) {
// Save the batch to the execution stack
this.s.batches.push(this.s.currentBatch);
// Create a new batch
this.s.currentBatch = new Batch(batchType, this.s.currentIndex);
// Reset the current size trackers
this.s.currentBatchSize = 0;
this.s.currentBatchSizeBytes = 0;
}
if (batchType === BatchType.INSERT) {
this.s.bulkResult.insertedIds.push({
index: this.s.currentIndex,
_id: (document as Document)._id
});
}
// We have an array of documents
if (Array.isArray(document)) {
throw new MongoInvalidArgumentError('Operation passed in cannot be an Array');
}
this.s.currentBatch.originalIndexes.push(this.s.currentIndex);
this.s.currentBatch.operations.push(document);
this.s.currentBatchSize += 1;
this.s.currentBatchSizeBytes += maxKeySize + bsonSize;
this.s.currentIndex += 1;
return this;
}
}

View file

@ -0,0 +1,115 @@
import type { Document } from '../bson';
import * as BSON from '../bson';
import type { Collection } from '../collection';
import { MongoInvalidArgumentError } from '../error';
import type { DeleteStatement } from '../operations/delete';
import type { UpdateStatement } from '../operations/update';
import {
Batch,
BatchType,
BulkOperationBase,
type BulkWriteOptions,
type BulkWriteResult
} from './common';
/** @public */
export class UnorderedBulkOperation extends BulkOperationBase {
/** @internal */
constructor(collection: Collection, options: BulkWriteOptions) {
super(collection, options, false);
}
override handleWriteError(writeResult: BulkWriteResult): void {
if (this.s.batches.length) {
return;
}
return super.handleWriteError(writeResult);
}
addToOperationsList(
batchType: BatchType,
document: Document | UpdateStatement | DeleteStatement
): this {
// Get the bsonSize
const bsonSize = BSON.calculateObjectSize(document, {
checkKeys: false,
// Since we don't know what the user selected for BSON options here,
// err on the safe side, and check the size with ignoreUndefined: false.
ignoreUndefined: false
} as any);
// Throw error if the doc is bigger than the max BSON size
if (bsonSize >= this.s.maxBsonObjectSize) {
// TODO(NODE-3483): Change this to MongoBSONError
throw new MongoInvalidArgumentError(
`Document is larger than the maximum size ${this.s.maxBsonObjectSize}`
);
}
// Holds the current batch
this.s.currentBatch = undefined;
// Get the right type of batch
if (batchType === BatchType.INSERT) {
this.s.currentBatch = this.s.currentInsertBatch;
} else if (batchType === BatchType.UPDATE) {
this.s.currentBatch = this.s.currentUpdateBatch;
} else if (batchType === BatchType.DELETE) {
this.s.currentBatch = this.s.currentRemoveBatch;
}
const maxKeySize = this.s.maxKeySize;
// Create a new batch object if we don't have a current one
if (this.s.currentBatch == null) {
this.s.currentBatch = new Batch(batchType, this.s.currentIndex);
}
// Check if we need to create a new batch
if (
// New batch if we exceed the max batch op size
this.s.currentBatch.size + 1 >= this.s.maxWriteBatchSize ||
// New batch if we exceed the maxBatchSizeBytes. Only matters if batch already has a doc,
// since we can't sent an empty batch
(this.s.currentBatch.size > 0 &&
this.s.currentBatch.sizeBytes + maxKeySize + bsonSize >= this.s.maxBatchSizeBytes) ||
// New batch if the new op does not have the same op type as the current batch
this.s.currentBatch.batchType !== batchType
) {
// Save the batch to the execution stack
this.s.batches.push(this.s.currentBatch);
// Create a new batch
this.s.currentBatch = new Batch(batchType, this.s.currentIndex);
}
// We have an array of documents
if (Array.isArray(document)) {
throw new MongoInvalidArgumentError('Operation passed in cannot be an Array');
}
this.s.currentBatch.operations.push(document);
this.s.currentBatch.originalIndexes.push(this.s.currentIndex);
this.s.currentIndex = this.s.currentIndex + 1;
// Save back the current Batch to the right type
if (batchType === BatchType.INSERT) {
this.s.currentInsertBatch = this.s.currentBatch;
this.s.bulkResult.insertedIds.push({
index: this.s.bulkResult.insertedIds.length,
_id: (document as Document)._id
});
} else if (batchType === BatchType.UPDATE) {
this.s.currentUpdateBatch = this.s.currentBatch;
} else if (batchType === BatchType.DELETE) {
this.s.currentRemoveBatch = this.s.currentBatch;
}
// Update current batch size
this.s.currentBatch.size += 1;
this.s.currentBatch.sizeBytes += maxKeySize + bsonSize;
return this;
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,468 @@
import {
type MongoCrypt,
type MongoCryptConstructor,
type MongoCryptOptions
} from 'mongodb-client-encryption';
import * as net from 'net';
import { deserialize, type Document, serialize } from '../bson';
import { type CommandOptions, type ProxyOptions } from '../cmap/connection';
import { kDecorateResult } from '../constants';
import { getMongoDBClientEncryption } from '../deps';
import { MongoRuntimeError } from '../error';
import { MongoClient, type MongoClientOptions } from '../mongo_client';
import { type Abortable } from '../mongo_types';
import { MongoDBCollectionNamespace } from '../utils';
import { autoSelectSocketOptions } from './client_encryption';
import * as cryptoCallbacks from './crypto_callbacks';
import { MongoCryptInvalidArgumentError } from './errors';
import { MongocryptdManager } from './mongocryptd_manager';
import {
type CredentialProviders,
isEmptyCredentials,
type KMSProviders,
refreshKMSCredentials
} from './providers';
import { type CSFLEKMSTlsOptions, StateMachine } from './state_machine';
/** @public */
export interface AutoEncryptionOptions {
/** @internal client for metadata lookups */
metadataClient?: MongoClient;
/** A `MongoClient` used to fetch keys from a key vault */
keyVaultClient?: MongoClient;
/** The namespace where keys are stored in the key vault */
keyVaultNamespace?: string;
/** Configuration options that are used by specific KMS providers during key generation, encryption, and decryption. */
kmsProviders?: KMSProviders;
/** Configuration options for custom credential providers. */
credentialProviders?: CredentialProviders;
/**
* A map of namespaces to a local JSON schema for encryption
*
* **NOTE**: Supplying options.schemaMap provides more security than relying on JSON Schemas obtained from the server.
* It protects against a malicious server advertising a false JSON Schema, which could trick the client into sending decrypted data that should be encrypted.
* Schemas supplied in the schemaMap only apply to configuring automatic encryption for Client-Side Field Level Encryption.
* Other validation rules in the JSON schema will not be enforced by the driver and will result in an error.
*/
schemaMap?: Document;
/** Supply a schema for the encrypted fields in the document */
encryptedFieldsMap?: Document;
/** Allows the user to bypass auto encryption, maintaining implicit decryption */
bypassAutoEncryption?: boolean;
/** Allows users to bypass query analysis */
bypassQueryAnalysis?: boolean;
options?: {
/** An optional hook to catch logging messages from the underlying encryption engine */
logger?: (level: AutoEncryptionLoggerLevel, message: string) => void;
};
extraOptions?: {
/**
* A local process the driver communicates with to determine how to encrypt values in a command.
* Defaults to "mongodb://%2Fvar%2Fmongocryptd.sock" if domain sockets are available or "mongodb://localhost:27020" otherwise
*/
mongocryptdURI?: string;
/** If true, autoEncryption will not attempt to spawn a mongocryptd before connecting */
mongocryptdBypassSpawn?: boolean;
/** The path to the mongocryptd executable on the system */
mongocryptdSpawnPath?: string;
/** Command line arguments to use when auto-spawning a mongocryptd */
mongocryptdSpawnArgs?: string[];
/**
* Full path to a MongoDB Crypt shared library to be used (instead of mongocryptd).
*
* This needs to be the path to the file itself, not a directory.
* It can be an absolute or relative path. If the path is relative and
* its first component is `$ORIGIN`, it will be replaced by the directory
* containing the mongodb-client-encryption native addon file. Otherwise,
* the path will be interpreted relative to the current working directory.
*
* Currently, loading different MongoDB Crypt shared library files from different
* MongoClients in the same process is not supported.
*
* If this option is provided and no MongoDB Crypt shared library could be loaded
* from the specified location, creating the MongoClient will fail.
*
* If this option is not provided and `cryptSharedLibRequired` is not specified,
* the AutoEncrypter will attempt to spawn and/or use mongocryptd according
* to the mongocryptd-specific `extraOptions` options.
*
* Specifying a path prevents mongocryptd from being used as a fallback.
*
* Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher.
*/
cryptSharedLibPath?: string;
/**
* If specified, never use mongocryptd and instead fail when the MongoDB Crypt
* shared library could not be loaded.
*
* This is always true when `cryptSharedLibPath` is specified.
*
* Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher.
*/
cryptSharedLibRequired?: boolean;
/**
* Search paths for a MongoDB Crypt shared library to be used (instead of mongocryptd)
* Only for driver testing!
* @internal
*/
cryptSharedLibSearchPaths?: string[];
};
proxyOptions?: ProxyOptions;
/** The TLS options to use connecting to the KMS provider */
tlsOptions?: CSFLEKMSTlsOptions;
}
/**
* @public
*
* Extra options related to the mongocryptd process
* \* _Available in MongoDB 6.0 or higher._
*/
export type AutoEncryptionExtraOptions = NonNullable<AutoEncryptionOptions['extraOptions']>;
/** @public */
export const AutoEncryptionLoggerLevel = Object.freeze({
FatalError: 0,
Error: 1,
Warning: 2,
Info: 3,
Trace: 4
} as const);
/**
* @public
* The level of severity of the log message
*
* | Value | Level |
* |-------|-------|
* | 0 | Fatal Error |
* | 1 | Error |
* | 2 | Warning |
* | 3 | Info |
* | 4 | Trace |
*/
export type AutoEncryptionLoggerLevel =
(typeof AutoEncryptionLoggerLevel)[keyof typeof AutoEncryptionLoggerLevel];
/**
* @internal An internal class to be used by the driver for auto encryption
* **NOTE**: Not meant to be instantiated directly, this is for internal use only.
*/
export class AutoEncrypter {
_client: MongoClient;
_bypassEncryption: boolean;
_keyVaultNamespace: string;
_keyVaultClient: MongoClient;
_metaDataClient: MongoClient;
_proxyOptions: ProxyOptions;
_tlsOptions: CSFLEKMSTlsOptions;
_kmsProviders: KMSProviders;
_bypassMongocryptdAndCryptShared: boolean;
_contextCounter: number;
_credentialProviders?: CredentialProviders;
_mongocryptdManager?: MongocryptdManager;
_mongocryptdClient?: MongoClient;
/** @internal */
_mongocrypt: MongoCrypt;
/**
* Used by devtools to enable decorating decryption results.
*
* When set and enabled, `decrypt` will automatically recursively
* traverse a decrypted document and if a field has been decrypted,
* it will mark it as decrypted. Compass uses this to determine which
* fields were decrypted.
*/
[kDecorateResult] = false;
/** @internal */
static getMongoCrypt(): MongoCryptConstructor {
const encryption = getMongoDBClientEncryption();
if ('kModuleError' in encryption) {
throw encryption.kModuleError;
}
return encryption.MongoCrypt;
}
/**
* Create an AutoEncrypter
*
* **Note**: Do not instantiate this class directly. Rather, supply the relevant options to a MongoClient
*
* **Note**: Supplying `options.schemaMap` provides more security than relying on JSON Schemas obtained from the server.
* It protects against a malicious server advertising a false JSON Schema, which could trick the client into sending unencrypted data that should be encrypted.
* Schemas supplied in the schemaMap only apply to configuring automatic encryption for Client-Side Field Level Encryption.
* Other validation rules in the JSON schema will not be enforced by the driver and will result in an error.
*
* @example <caption>Create an AutoEncrypter that makes use of mongocryptd</caption>
* ```ts
* // Enabling autoEncryption via a MongoClient using mongocryptd
* const { MongoClient } = require('mongodb');
* const client = new MongoClient(URL, {
* autoEncryption: {
* kmsProviders: {
* aws: {
* accessKeyId: AWS_ACCESS_KEY,
* secretAccessKey: AWS_SECRET_KEY
* }
* }
* }
* });
* ```
*
* await client.connect();
* // From here on, the client will be encrypting / decrypting automatically
* @example <caption>Create an AutoEncrypter that makes use of libmongocrypt's CSFLE shared library</caption>
* ```ts
* // Enabling autoEncryption via a MongoClient using CSFLE shared library
* const { MongoClient } = require('mongodb');
* const client = new MongoClient(URL, {
* autoEncryption: {
* kmsProviders: {
* aws: {}
* },
* extraOptions: {
* cryptSharedLibPath: '/path/to/local/crypt/shared/lib',
* cryptSharedLibRequired: true
* }
* }
* });
* ```
*
* await client.connect();
* // From here on, the client will be encrypting / decrypting automatically
*/
constructor(client: MongoClient, options: AutoEncryptionOptions) {
this._client = client;
this._bypassEncryption = options.bypassAutoEncryption === true;
this._keyVaultNamespace = options.keyVaultNamespace || 'admin.datakeys';
this._keyVaultClient = options.keyVaultClient || client;
this._metaDataClient = options.metadataClient || client;
this._proxyOptions = options.proxyOptions || {};
this._tlsOptions = options.tlsOptions || {};
this._kmsProviders = options.kmsProviders || {};
this._credentialProviders = options.credentialProviders;
if (options.credentialProviders?.aws && !isEmptyCredentials('aws', this._kmsProviders)) {
throw new MongoCryptInvalidArgumentError(
'Can only provide a custom AWS credential provider when the state machine is configured for automatic AWS credential fetching'
);
}
const mongoCryptOptions: MongoCryptOptions = {
enableMultipleCollinfo: true,
cryptoCallbacks
};
if (options.schemaMap) {
mongoCryptOptions.schemaMap = Buffer.isBuffer(options.schemaMap)
? options.schemaMap
: (serialize(options.schemaMap) as Buffer);
}
if (options.encryptedFieldsMap) {
mongoCryptOptions.encryptedFieldsMap = Buffer.isBuffer(options.encryptedFieldsMap)
? options.encryptedFieldsMap
: (serialize(options.encryptedFieldsMap) as Buffer);
}
mongoCryptOptions.kmsProviders = !Buffer.isBuffer(this._kmsProviders)
? (serialize(this._kmsProviders) as Buffer)
: this._kmsProviders;
if (options.options?.logger) {
mongoCryptOptions.logger = options.options.logger;
}
if (options.extraOptions && options.extraOptions.cryptSharedLibPath) {
mongoCryptOptions.cryptSharedLibPath = options.extraOptions.cryptSharedLibPath;
}
if (options.bypassQueryAnalysis) {
mongoCryptOptions.bypassQueryAnalysis = options.bypassQueryAnalysis;
}
this._bypassMongocryptdAndCryptShared = this._bypassEncryption || !!options.bypassQueryAnalysis;
if (options.extraOptions && options.extraOptions.cryptSharedLibSearchPaths) {
// Only for driver testing
mongoCryptOptions.cryptSharedLibSearchPaths = options.extraOptions.cryptSharedLibSearchPaths;
} else if (!this._bypassMongocryptdAndCryptShared) {
mongoCryptOptions.cryptSharedLibSearchPaths = ['$SYSTEM'];
}
const MongoCrypt = AutoEncrypter.getMongoCrypt();
this._mongocrypt = new MongoCrypt(mongoCryptOptions);
this._contextCounter = 0;
if (
options.extraOptions &&
options.extraOptions.cryptSharedLibRequired &&
!this.cryptSharedLibVersionInfo
) {
throw new MongoCryptInvalidArgumentError(
'`cryptSharedLibRequired` set but no crypt_shared library loaded'
);
}
// Only instantiate mongocryptd manager/client once we know for sure
// that we are not using the CSFLE shared library.
if (!this._bypassMongocryptdAndCryptShared && !this.cryptSharedLibVersionInfo) {
this._mongocryptdManager = new MongocryptdManager(options.extraOptions);
const clientOptions: MongoClientOptions = {
serverSelectionTimeoutMS: 10000
};
if (
(options.extraOptions == null || typeof options.extraOptions.mongocryptdURI !== 'string') &&
!net.getDefaultAutoSelectFamily
) {
// Only set family if autoSelectFamily options are not supported.
clientOptions.family = 4;
}
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore: TS complains as this always returns true on versions where it is present.
if (net.getDefaultAutoSelectFamily) {
// AutoEncrypter is made inside of MongoClient constructor while options are being parsed,
// we do not have access to the options that are in progress.
// TODO(NODE-6449): AutoEncrypter does not use client options for autoSelectFamily
Object.assign(clientOptions, autoSelectSocketOptions(this._client.s?.options ?? {}));
}
this._mongocryptdClient = new MongoClient(this._mongocryptdManager.uri, clientOptions);
}
}
/**
* Initializes the auto encrypter by spawning a mongocryptd and connecting to it.
*
* This function is a no-op when bypassSpawn is set or the crypt shared library is used.
*/
async init(): Promise<MongoClient | void> {
if (this._bypassMongocryptdAndCryptShared || this.cryptSharedLibVersionInfo) {
return;
}
if (!this._mongocryptdManager) {
throw new MongoRuntimeError(
'Reached impossible state: mongocryptdManager is undefined when neither bypassSpawn nor the shared lib are specified.'
);
}
if (!this._mongocryptdClient) {
throw new MongoRuntimeError(
'Reached impossible state: mongocryptdClient is undefined when neither bypassSpawn nor the shared lib are specified.'
);
}
if (!this._mongocryptdManager.bypassSpawn) {
await this._mongocryptdManager.spawn();
}
try {
const client = await this._mongocryptdClient.connect();
return client;
} catch (error) {
throw new MongoRuntimeError(
'Unable to connect to `mongocryptd`, please make sure it is running or in your PATH for auto-spawn',
{ cause: error }
);
}
}
/**
* Cleans up the `_mongocryptdClient`, if present.
*/
async teardown(force: boolean): Promise<void> {
await this._mongocryptdClient?.close(force);
}
/**
* Encrypt a command for a given namespace.
*/
async encrypt(
ns: string,
cmd: Document,
options: CommandOptions & Abortable = {}
): Promise<Document | Uint8Array> {
options.signal?.throwIfAborted();
if (this._bypassEncryption) {
// If `bypassAutoEncryption` has been specified, don't encrypt
return cmd;
}
const commandBuffer = Buffer.isBuffer(cmd) ? cmd : serialize(cmd, options);
const context = this._mongocrypt.makeEncryptionContext(
MongoDBCollectionNamespace.fromString(ns).db,
commandBuffer
);
context.id = this._contextCounter++;
context.ns = ns;
context.document = cmd;
const stateMachine = new StateMachine({
promoteValues: false,
promoteLongs: false,
proxyOptions: this._proxyOptions,
tlsOptions: this._tlsOptions,
socketOptions: autoSelectSocketOptions(this._client.s.options)
});
return deserialize(await stateMachine.execute(this, context, options), {
promoteValues: false,
promoteLongs: false
});
}
/**
* Decrypt a command response
*/
async decrypt(
response: Uint8Array,
options: CommandOptions & Abortable = {}
): Promise<Uint8Array> {
options.signal?.throwIfAborted();
const context = this._mongocrypt.makeDecryptionContext(response);
context.id = this._contextCounter++;
const stateMachine = new StateMachine({
...options,
proxyOptions: this._proxyOptions,
tlsOptions: this._tlsOptions,
socketOptions: autoSelectSocketOptions(this._client.s.options)
});
return await stateMachine.execute(this, context, options);
}
/**
* Ask the user for KMS credentials.
*
* This returns anything that looks like the kmsProviders original input
* option. It can be empty, and any provider specified here will override
* the original ones.
*/
async askForKMSCredentials(): Promise<KMSProviders> {
return await refreshKMSCredentials(this._kmsProviders, this._credentialProviders);
}
/**
* Return the current libmongocrypt's CSFLE shared library version
* as `{ version: bigint, versionStr: string }`, or `null` if no CSFLE
* shared library was loaded.
*/
get cryptSharedLibVersionInfo(): { version: bigint; versionStr: string } | null {
return this._mongocrypt.cryptSharedLibVersionInfo;
}
static get libmongocryptVersion(): string {
return AutoEncrypter.getMongoCrypt().libmongocryptVersion;
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,87 @@
import * as crypto from 'crypto';
type AES256Callback = (key: Buffer, iv: Buffer, input: Buffer, output: Buffer) => number | Error;
export function makeAES256Hook(
method: 'createCipheriv' | 'createDecipheriv',
mode: 'aes-256-cbc' | 'aes-256-ctr'
): AES256Callback {
return function (key: Buffer, iv: Buffer, input: Buffer, output: Buffer): number | Error {
let result;
try {
const cipher = crypto[method](mode, key, iv);
cipher.setAutoPadding(false);
result = cipher.update(input);
const final = cipher.final();
if (final.length > 0) {
result = Buffer.concat([result, final]);
}
} catch (e) {
return e;
}
result.copy(output);
return result.length;
};
}
export function randomHook(buffer: Buffer, count: number): number | Error {
try {
crypto.randomFillSync(buffer, 0, count);
} catch (e) {
return e;
}
return count;
}
export function sha256Hook(input: Buffer, output: Buffer): number | Error {
let result;
try {
result = crypto.createHash('sha256').update(input).digest();
} catch (e) {
return e;
}
result.copy(output);
return result.length;
}
type HMACHook = (key: Buffer, input: Buffer, output: Buffer) => number | Error;
export function makeHmacHook(algorithm: 'sha512' | 'sha256'): HMACHook {
return (key: Buffer, input: Buffer, output: Buffer): number | Error => {
let result;
try {
result = crypto.createHmac(algorithm, key).update(input).digest();
} catch (e) {
return e;
}
result.copy(output);
return result.length;
};
}
export function signRsaSha256Hook(key: Buffer, input: Buffer, output: Buffer): number | Error {
let result;
try {
const signer = crypto.createSign('sha256WithRSAEncryption');
const privateKey = Buffer.from(
`-----BEGIN PRIVATE KEY-----\n${key.toString('base64')}\n-----END PRIVATE KEY-----\n`
);
result = signer.update(input).end().sign(privateKey);
} catch (e) {
return e;
}
result.copy(output);
return result.length;
}
export const aes256CbcEncryptHook = makeAES256Hook('createCipheriv', 'aes-256-cbc');
export const aes256CbcDecryptHook = makeAES256Hook('createDecipheriv', 'aes-256-cbc');
export const aes256CtrEncryptHook = makeAES256Hook('createCipheriv', 'aes-256-ctr');
export const aes256CtrDecryptHook = makeAES256Hook('createDecipheriv', 'aes-256-ctr');
export const hmacSha512Hook = makeHmacHook('sha512');
export const hmacSha256Hook = makeHmacHook('sha256');

View file

@ -0,0 +1,141 @@
import { type Document } from '../bson';
import { MongoError } from '../error';
/**
* @public
* An error indicating that something went wrong specifically with MongoDB Client Encryption
*/
export class MongoCryptError extends MongoError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string, options: { cause?: Error } = {}) {
super(message, options);
}
override get name() {
return 'MongoCryptError';
}
}
/**
* @public
*
* An error indicating an invalid argument was provided to an encryption API.
*/
export class MongoCryptInvalidArgumentError extends MongoCryptError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string) {
super(message);
}
override get name() {
return 'MongoCryptInvalidArgumentError';
}
}
/**
* @public
* An error indicating that `ClientEncryption.createEncryptedCollection()` failed to create data keys
*/
export class MongoCryptCreateDataKeyError extends MongoCryptError {
encryptedFields: Document;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(encryptedFields: Document, { cause }: { cause: Error }) {
super(`Unable to complete creating data keys: ${cause.message}`, { cause });
this.encryptedFields = encryptedFields;
}
override get name() {
return 'MongoCryptCreateDataKeyError';
}
}
/**
* @public
* An error indicating that `ClientEncryption.createEncryptedCollection()` failed to create a collection
*/
export class MongoCryptCreateEncryptedCollectionError extends MongoCryptError {
encryptedFields: Document;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(encryptedFields: Document, { cause }: { cause: Error }) {
super(`Unable to create collection: ${cause.message}`, { cause });
this.encryptedFields = encryptedFields;
}
override get name() {
return 'MongoCryptCreateEncryptedCollectionError';
}
}
/**
* @public
* An error indicating that mongodb-client-encryption failed to auto-refresh Azure KMS credentials.
*/
export class MongoCryptAzureKMSRequestError extends MongoCryptError {
/** The body of the http response that failed, if present. */
body?: Document;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string, body?: Document) {
super(message);
this.body = body;
}
override get name(): string {
return 'MongoCryptAzureKMSRequestError';
}
}
/** @public */
export class MongoCryptKMSRequestNetworkTimeoutError extends MongoCryptError {
override get name(): string {
return 'MongoCryptKMSRequestNetworkTimeoutError';
}
}

View file

@ -0,0 +1,100 @@
import type { ChildProcess } from 'child_process';
import { MongoNetworkTimeoutError } from '../error';
import { type AutoEncryptionExtraOptions } from './auto_encrypter';
/**
* @internal
* An internal class that handles spawning a mongocryptd.
*/
export class MongocryptdManager {
static DEFAULT_MONGOCRYPTD_URI = 'mongodb://localhost:27020';
uri: string;
bypassSpawn: boolean;
spawnPath = '';
spawnArgs: Array<string> = [];
_child?: ChildProcess;
constructor(extraOptions: AutoEncryptionExtraOptions = {}) {
this.uri =
typeof extraOptions.mongocryptdURI === 'string' && extraOptions.mongocryptdURI.length > 0
? extraOptions.mongocryptdURI
: MongocryptdManager.DEFAULT_MONGOCRYPTD_URI;
this.bypassSpawn = !!extraOptions.mongocryptdBypassSpawn;
if (Object.hasOwn(extraOptions, 'mongocryptdSpawnPath') && extraOptions.mongocryptdSpawnPath) {
this.spawnPath = extraOptions.mongocryptdSpawnPath;
}
if (
Object.hasOwn(extraOptions, 'mongocryptdSpawnArgs') &&
Array.isArray(extraOptions.mongocryptdSpawnArgs)
) {
this.spawnArgs = this.spawnArgs.concat(extraOptions.mongocryptdSpawnArgs);
}
if (
this.spawnArgs
.filter(arg => typeof arg === 'string')
.every(arg => arg.indexOf('--idleShutdownTimeoutSecs') < 0)
) {
this.spawnArgs.push('--idleShutdownTimeoutSecs', '60');
}
}
/**
* Will check to see if a mongocryptd is up. If it is not up, it will attempt
* to spawn a mongocryptd in a detached process, and then wait for it to be up.
*/
async spawn(): Promise<void> {
const cmdName = this.spawnPath || 'mongocryptd';
// eslint-disable-next-line @typescript-eslint/no-require-imports
const { spawn } = require('child_process') as typeof import('child_process');
// Spawned with stdio: ignore and detached: true
// to ensure child can outlive parent.
this._child = spawn(cmdName, this.spawnArgs, {
stdio: 'ignore',
detached: true
});
this._child.on('error', () => {
// From the FLE spec:
// "The stdout and stderr of the spawned process MUST not be exposed in the driver
// (e.g. redirect to /dev/null). Users can pass the argument --logpath to
// extraOptions.mongocryptdSpawnArgs if they need to inspect mongocryptd logs.
// If spawning is necessary, the driver MUST spawn mongocryptd whenever server
// selection on the MongoClient to mongocryptd fails. If the MongoClient fails to
// connect after spawning, the server selection error is propagated to the user."
// The AutoEncrypter and MongoCryptdManager should work together to spawn
// mongocryptd whenever necessary. Additionally, the `mongocryptd` intentionally
// shuts down after 60s and gets respawned when necessary. We rely on server
// selection timeouts when connecting to the `mongocryptd` to inform users that something
// has been configured incorrectly. For those reasons, we suppress stderr from
// the `mongocryptd` process and immediately unref the process.
});
// unref child to remove handle from event loop
this._child.unref();
}
/**
* @returns the result of `fn` or rejects with an error.
*/
async withRespawn<T>(fn: () => Promise<T>): ReturnType<typeof fn> {
try {
const result = await fn();
return result;
} catch (err) {
// If we are not bypassing spawning, then we should retry once on a MongoTimeoutError (server selection error)
const shouldSpawn = err instanceof MongoNetworkTimeoutError && !this.bypassSpawn;
if (!shouldSpawn) {
throw err;
}
}
await this.spawn();
const result = await fn();
return result;
}
}

View file

@ -0,0 +1,33 @@
import {
type AWSCredentialProvider,
AWSSDKCredentialProvider
} from '../../cmap/auth/aws_temporary_credentials';
import { type KMSProviders } from '.';
/**
* @internal
*/
export async function loadAWSCredentials(
kmsProviders: KMSProviders,
provider?: AWSCredentialProvider
): Promise<KMSProviders> {
const credentialProvider = new AWSSDKCredentialProvider(provider);
// We shouldn't ever receive a response from the AWS SDK that doesn't have a `SecretAccessKey`
// or `AccessKeyId`. However, TS says these fields are optional. We provide empty strings
// and let libmongocrypt error if we're unable to fetch the required keys.
const {
SecretAccessKey = '',
AccessKeyId = '',
Token
} = await credentialProvider.getCredentials();
const aws: NonNullable<KMSProviders['aws']> = {
secretAccessKey: SecretAccessKey,
accessKeyId: AccessKeyId
};
// the AWS session token is only required for temporary credentials so only attach it to the
// result if it's present in the response from the aws sdk
Token != null && (aws.sessionToken = Token);
return { ...kmsProviders, aws };
}

View file

@ -0,0 +1,181 @@
import { type Document } from '../../bson';
import { MongoNetworkTimeoutError } from '../../error';
import { get } from '../../utils';
import { MongoCryptAzureKMSRequestError } from '../errors';
import { type KMSProviders } from './index';
const MINIMUM_TOKEN_REFRESH_IN_MILLISECONDS = 6000;
/** Base URL for getting Azure tokens. */
export const AZURE_BASE_URL = 'http://169.254.169.254/metadata/identity/oauth2/token?';
/**
* The access token that libmongocrypt expects for Azure kms.
*/
interface AccessToken {
accessToken: string;
}
/**
* The response from the azure idms endpoint, including the `expiresOnTimestamp`.
* `expiresOnTimestamp` is needed for caching.
*/
interface AzureTokenCacheEntry extends AccessToken {
accessToken: string;
expiresOnTimestamp: number;
}
/**
* @internal
*/
export class AzureCredentialCache {
cachedToken: AzureTokenCacheEntry | null = null;
async getToken(): Promise<AccessToken> {
if (this.cachedToken == null || this.needsRefresh(this.cachedToken)) {
this.cachedToken = await this._getToken();
}
return { accessToken: this.cachedToken.accessToken };
}
needsRefresh(token: AzureTokenCacheEntry): boolean {
const timeUntilExpirationMS = token.expiresOnTimestamp - Date.now();
return timeUntilExpirationMS <= MINIMUM_TOKEN_REFRESH_IN_MILLISECONDS;
}
/**
* exposed for testing
*/
resetCache() {
this.cachedToken = null;
}
/**
* exposed for testing
*/
_getToken(): Promise<AzureTokenCacheEntry> {
return fetchAzureKMSToken();
}
}
/** @internal */
export const tokenCache = new AzureCredentialCache();
/** @internal */
async function parseResponse(response: {
body: string;
status?: number;
}): Promise<AzureTokenCacheEntry> {
const { status, body: rawBody } = response;
const body: { expires_in?: number; access_token?: string } = (() => {
try {
return JSON.parse(rawBody);
} catch {
throw new MongoCryptAzureKMSRequestError('Malformed JSON body in GET request.');
}
})();
if (status !== 200) {
throw new MongoCryptAzureKMSRequestError('Unable to complete request.', body);
}
if (!body.access_token) {
throw new MongoCryptAzureKMSRequestError(
'Malformed response body - missing field `access_token`.'
);
}
if (!body.expires_in) {
throw new MongoCryptAzureKMSRequestError(
'Malformed response body - missing field `expires_in`.'
);
}
const expiresInMS = Number(body.expires_in) * 1000;
if (Number.isNaN(expiresInMS)) {
throw new MongoCryptAzureKMSRequestError(
'Malformed response body - unable to parse int from `expires_in` field.'
);
}
return {
accessToken: body.access_token,
expiresOnTimestamp: Date.now() + expiresInMS
};
}
/**
* @internal
*
* exposed for CSFLE
* [prose test 18](https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#azure-imds-credentials)
*/
export interface AzureKMSRequestOptions {
headers?: Document;
url?: URL | string;
}
/**
* @internal
* Get the Azure endpoint URL.
*/
export function addAzureParams(url: URL, resource: string, username?: string): URL {
url.searchParams.append('api-version', '2018-02-01');
url.searchParams.append('resource', resource);
if (username) {
url.searchParams.append('client_id', username);
}
return url;
}
/**
* @internal
*
* parses any options provided by prose tests to `fetchAzureKMSToken` and merges them with
* the default values for headers and the request url.
*/
export function prepareRequest(options: AzureKMSRequestOptions): {
headers: Document;
url: URL;
} {
const url = new URL(options.url?.toString() ?? AZURE_BASE_URL);
addAzureParams(url, 'https://vault.azure.net');
const headers = { ...options.headers, 'Content-Type': 'application/json', Metadata: true };
return { headers, url };
}
/**
* @internal
*
* `AzureKMSRequestOptions` allows prose tests to modify the http request sent to the idms
* servers. This is required to simulate different server conditions. No options are expected to
* be set outside of tests.
*
* exposed for CSFLE
* [prose test 18](https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#azure-imds-credentials)
*/
export async function fetchAzureKMSToken(
options: AzureKMSRequestOptions = {}
): Promise<AzureTokenCacheEntry> {
const { headers, url } = prepareRequest(options);
try {
const response = await get(url, { headers });
return await parseResponse(response);
} catch (error) {
if (error instanceof MongoNetworkTimeoutError) {
throw new MongoCryptAzureKMSRequestError(`[Azure KMS] ${error.message}`);
}
throw error;
}
}
/**
* @internal
*
* @throws Will reject with a `MongoCryptError` if the http request fails or the http response is malformed.
*/
export async function loadAzureCredentials(kmsProviders: KMSProviders): Promise<KMSProviders> {
const azure = await tokenCache.getToken();
return { ...kmsProviders, azure };
}

View file

@ -0,0 +1,16 @@
import { getGcpMetadata } from '../../deps';
import { type KMSProviders } from '.';
/** @internal */
export async function loadGCPCredentials(kmsProviders: KMSProviders): Promise<KMSProviders> {
const gcpMetadata = getGcpMetadata();
if ('kModuleError' in gcpMetadata) {
return kmsProviders;
}
const { access_token: accessToken } = await gcpMetadata.instance<{ access_token: string }>({
property: 'service-accounts/default/token'
});
return { ...kmsProviders, gcp: { accessToken } };
}

View file

@ -0,0 +1,207 @@
import type { Binary } from '../../bson';
import { type AWSCredentialProvider } from '../../cmap/auth/aws_temporary_credentials';
import { loadAWSCredentials } from './aws';
import { loadAzureCredentials } from './azure';
import { loadGCPCredentials } from './gcp';
/**
* @public
*
* A data key provider. Allowed values:
*
* - aws, gcp, local, kmip or azure
* - (`mongodb-client-encryption>=6.0.1` only) a named key, in the form of:
* `aws:<name>`, `gcp:<name>`, `local:<name>`, `kmip:<name>`, `azure:<name>`
* where `name` is an alphanumeric string, underscores allowed.
*/
export type ClientEncryptionDataKeyProvider = keyof KMSProviders;
/** @public */
export interface AWSKMSProviderConfiguration {
/**
* The access key used for the AWS KMS provider
*/
accessKeyId: string;
/**
* The secret access key used for the AWS KMS provider
*/
secretAccessKey: string;
/**
* An optional AWS session token that will be used as the
* X-Amz-Security-Token header for AWS requests.
*/
sessionToken?: string;
}
/** @public */
export interface LocalKMSProviderConfiguration {
/**
* The master key used to encrypt/decrypt data keys.
* A 96-byte long Buffer or base64 encoded string.
*/
key: Binary | Uint8Array | string;
}
/** @public */
export interface KMIPKMSProviderConfiguration {
/**
* The output endpoint string.
* The endpoint consists of a hostname and port separated by a colon.
* E.g. "example.com:123". A port is always present.
*/
endpoint?: string;
}
/** @public */
export type AzureKMSProviderConfiguration =
| {
/**
* The tenant ID identifies the organization for the account
*/
tenantId: string;
/**
* The client ID to authenticate a registered application
*/
clientId: string;
/**
* The client secret to authenticate a registered application
*/
clientSecret: string;
/**
* If present, a host with optional port. E.g. "example.com" or "example.com:443".
* This is optional, and only needed if customer is using a non-commercial Azure instance
* (e.g. a government or China account, which use different URLs).
* Defaults to "login.microsoftonline.com"
*/
identityPlatformEndpoint?: string | undefined;
}
| {
/**
* If present, an access token to authenticate with Azure.
*/
accessToken: string;
};
/** @public */
export type GCPKMSProviderConfiguration =
| {
/**
* The service account email to authenticate
*/
email: string;
/**
* A PKCS#8 encrypted key. This can either be a base64 string or a binary representation
*/
privateKey: string | Buffer;
/**
* If present, a host with optional port. E.g. "example.com" or "example.com:443".
* Defaults to "oauth2.googleapis.com"
*/
endpoint?: string | undefined;
}
| {
/**
* If present, an access token to authenticate with GCP.
*/
accessToken: string;
};
/**
* @public
* Configuration options for custom credential providers for KMS requests.
*/
export interface CredentialProviders {
/* A custom AWS credential provider */
aws?: AWSCredentialProvider;
}
/**
* @public
* Configuration options that are used by specific KMS providers during key generation, encryption, and decryption.
*
* Named KMS providers _are not supported_ for automatic KMS credential fetching.
*/
export interface KMSProviders {
/**
* Configuration options for using 'aws' as your KMS provider
*/
aws?: AWSKMSProviderConfiguration | Record<string, never>;
[key: `aws:${string}`]: AWSKMSProviderConfiguration;
/**
* Configuration options for using 'local' as your KMS provider
*/
local?: LocalKMSProviderConfiguration;
[key: `local:${string}`]: LocalKMSProviderConfiguration;
/**
* Configuration options for using 'kmip' as your KMS provider
*/
kmip?: KMIPKMSProviderConfiguration;
[key: `kmip:${string}`]: KMIPKMSProviderConfiguration;
/**
* Configuration options for using 'azure' as your KMS provider
*/
azure?: AzureKMSProviderConfiguration | Record<string, never>;
[key: `azure:${string}`]: AzureKMSProviderConfiguration;
/**
* Configuration options for using 'gcp' as your KMS provider
*/
gcp?: GCPKMSProviderConfiguration | Record<string, never>;
[key: `gcp:${string}`]: GCPKMSProviderConfiguration;
}
/**
* Auto credential fetching should only occur when the provider is defined on the kmsProviders map
* and the settings are an empty object.
*
* This is distinct from a nullish provider key.
*
* @internal - exposed for testing purposes only
*/
export function isEmptyCredentials(
providerName: ClientEncryptionDataKeyProvider,
kmsProviders: KMSProviders
): boolean {
const provider = kmsProviders[providerName];
if (provider == null) {
return false;
}
return typeof provider === 'object' && Object.keys(provider).length === 0;
}
/**
* Load cloud provider credentials for the user provided KMS providers.
* Credentials will only attempt to get loaded if they do not exist
* and no existing credentials will get overwritten.
*
* @internal
*/
export async function refreshKMSCredentials(
kmsProviders: KMSProviders,
credentialProviders?: CredentialProviders
): Promise<KMSProviders> {
let finalKMSProviders = kmsProviders;
if (isEmptyCredentials('aws', kmsProviders)) {
finalKMSProviders = await loadAWSCredentials(finalKMSProviders, credentialProviders?.aws);
}
if (isEmptyCredentials('gcp', kmsProviders)) {
finalKMSProviders = await loadGCPCredentials(finalKMSProviders);
}
if (isEmptyCredentials('azure', kmsProviders)) {
finalKMSProviders = await loadAzureCredentials(finalKMSProviders);
}
return finalKMSProviders;
}

View file

@ -0,0 +1,648 @@
import * as fs from 'fs/promises';
import { type MongoCryptContext, type MongoCryptKMSRequest } from 'mongodb-client-encryption';
import * as net from 'net';
import * as tls from 'tls';
import {
type BSONSerializeOptions,
deserialize,
type Document,
pluckBSONSerializeOptions,
serialize
} from '../bson';
import { type ProxyOptions } from '../cmap/connection';
import { CursorTimeoutContext } from '../cursor/abstract_cursor';
import { getSocks, type SocksLib } from '../deps';
import { MongoOperationTimeoutError } from '../error';
import { type MongoClient, type MongoClientOptions } from '../mongo_client';
import { type Abortable } from '../mongo_types';
import { type CollectionInfo } from '../operations/list_collections';
import { Timeout, type TimeoutContext, TimeoutError } from '../timeout';
import {
addAbortListener,
BufferPool,
kDispose,
MongoDBCollectionNamespace,
promiseWithResolvers
} from '../utils';
import { autoSelectSocketOptions, type DataKey } from './client_encryption';
import { MongoCryptError } from './errors';
import { type MongocryptdManager } from './mongocryptd_manager';
import { type KMSProviders } from './providers';
let socks: SocksLib | null = null;
function loadSocks(): SocksLib {
if (socks == null) {
const socksImport = getSocks();
if ('kModuleError' in socksImport) {
throw socksImport.kModuleError;
}
socks = socksImport;
}
return socks;
}
// libmongocrypt states
const MONGOCRYPT_CTX_ERROR = 0;
const MONGOCRYPT_CTX_NEED_MONGO_COLLINFO = 1;
const MONGOCRYPT_CTX_NEED_MONGO_MARKINGS = 2;
const MONGOCRYPT_CTX_NEED_MONGO_KEYS = 3;
const MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS = 7;
const MONGOCRYPT_CTX_NEED_KMS = 4;
const MONGOCRYPT_CTX_READY = 5;
const MONGOCRYPT_CTX_DONE = 6;
const HTTPS_PORT = 443;
const stateToString = new Map([
[MONGOCRYPT_CTX_ERROR, 'MONGOCRYPT_CTX_ERROR'],
[MONGOCRYPT_CTX_NEED_MONGO_COLLINFO, 'MONGOCRYPT_CTX_NEED_MONGO_COLLINFO'],
[MONGOCRYPT_CTX_NEED_MONGO_MARKINGS, 'MONGOCRYPT_CTX_NEED_MONGO_MARKINGS'],
[MONGOCRYPT_CTX_NEED_MONGO_KEYS, 'MONGOCRYPT_CTX_NEED_MONGO_KEYS'],
[MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS, 'MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS'],
[MONGOCRYPT_CTX_NEED_KMS, 'MONGOCRYPT_CTX_NEED_KMS'],
[MONGOCRYPT_CTX_READY, 'MONGOCRYPT_CTX_READY'],
[MONGOCRYPT_CTX_DONE, 'MONGOCRYPT_CTX_DONE']
]);
const INSECURE_TLS_OPTIONS = [
'tlsInsecure',
'tlsAllowInvalidCertificates',
'tlsAllowInvalidHostnames',
// These options are disallowed by the spec, so we explicitly filter them out if provided, even
// though the StateMachine does not declare support for these options.
'tlsDisableOCSPEndpointCheck',
'tlsDisableCertificateRevocationCheck'
];
/**
* Helper function for logging. Enabled by setting the environment flag MONGODB_CRYPT_DEBUG.
* @param msg - Anything you want to be logged.
*/
function debug(msg: unknown) {
if (process.env.MONGODB_CRYPT_DEBUG) {
// eslint-disable-next-line no-console
console.error(msg);
}
}
declare module 'mongodb-client-encryption' {
// the properties added to `MongoCryptContext` here are only used for the `StateMachine`'s
// execute method and are not part of the C++ bindings.
interface MongoCryptContext {
id: number;
document: Document;
ns: string;
}
}
/**
* @public
*
* TLS options to use when connecting. The spec specifically calls out which insecure
* tls options are not allowed:
*
* - tlsAllowInvalidCertificates
* - tlsAllowInvalidHostnames
* - tlsInsecure
*
* These options are not included in the type, and are ignored if provided.
*/
export type ClientEncryptionTlsOptions = Pick<
MongoClientOptions,
'tlsCAFile' | 'tlsCertificateKeyFile' | 'tlsCertificateKeyFilePassword'
>;
/** @public */
export type CSFLEKMSTlsOptions = {
aws?: ClientEncryptionTlsOptions;
gcp?: ClientEncryptionTlsOptions;
kmip?: ClientEncryptionTlsOptions;
local?: ClientEncryptionTlsOptions;
azure?: ClientEncryptionTlsOptions;
[key: string]: ClientEncryptionTlsOptions | undefined;
};
/**
* @public
*
* Socket options to use for KMS requests.
*/
export type ClientEncryptionSocketOptions = Pick<
MongoClientOptions,
'autoSelectFamily' | 'autoSelectFamilyAttemptTimeout'
>;
/**
* This is kind of a hack. For `rewrapManyDataKey`, we have tests that
* guarantee that when there are no matching keys, `rewrapManyDataKey` returns
* nothing. We also have tests for auto encryption that guarantee for `encrypt`
* we return an error when there are no matching keys. This error is generated in
* subsequent iterations of the state machine.
* Some apis (`encrypt`) throw if there are no filter matches and others (`rewrapManyDataKey`)
* do not. We set the result manually here, and let the state machine continue. `libmongocrypt`
* will inform us if we need to error by setting the state to `MONGOCRYPT_CTX_ERROR` but
* otherwise we'll return `{ v: [] }`.
*/
let EMPTY_V;
/**
* @internal
*
* An interface representing an object that can be passed to the `StateMachine.execute` method.
*
* Not all properties are required for all operations.
*/
export interface StateMachineExecutable {
_keyVaultNamespace: string;
_keyVaultClient: MongoClient;
askForKMSCredentials: () => Promise<KMSProviders>;
/** only used for auto encryption */
_metaDataClient?: MongoClient;
/** only used for auto encryption */
_mongocryptdClient?: MongoClient;
/** only used for auto encryption */
_mongocryptdManager?: MongocryptdManager;
}
export type StateMachineOptions = {
/** socks5 proxy options, if set. */
proxyOptions: ProxyOptions;
/** TLS options for KMS requests, if set. */
tlsOptions: CSFLEKMSTlsOptions;
/** Socket specific options we support. */
socketOptions: ClientEncryptionSocketOptions;
} & Pick<BSONSerializeOptions, 'promoteLongs' | 'promoteValues'>;
/**
* @internal
* An internal class that executes across a MongoCryptContext until either
* a finishing state or an error is reached. Do not instantiate directly.
*/
// TODO(DRIVERS-2671): clarify CSOT behavior for FLE APIs
export class StateMachine {
constructor(
private options: StateMachineOptions,
private bsonOptions = pluckBSONSerializeOptions(options)
) {}
/**
* Executes the state machine according to the specification
*/
async execute(
executor: StateMachineExecutable,
context: MongoCryptContext,
options: { timeoutContext?: TimeoutContext } & Abortable
): Promise<Uint8Array> {
const keyVaultNamespace = executor._keyVaultNamespace;
const keyVaultClient = executor._keyVaultClient;
const metaDataClient = executor._metaDataClient;
const mongocryptdClient = executor._mongocryptdClient;
const mongocryptdManager = executor._mongocryptdManager;
let result: Uint8Array | null = null;
// Typescript treats getters just like properties: Once you've tested it for equality
// it cannot change. Which is exactly the opposite of what we use state and status for.
// Every call to at least `addMongoOperationResponse` and `finalize` can change the state.
// These wrappers let us write code more naturally and not add compiler exceptions
// to conditions checks inside the state machine.
const getStatus = () => context.status;
const getState = () => context.state;
while (getState() !== MONGOCRYPT_CTX_DONE && getState() !== MONGOCRYPT_CTX_ERROR) {
options.signal?.throwIfAborted();
debug(`[context#${context.id}] ${stateToString.get(getState()) || getState()}`);
switch (getState()) {
case MONGOCRYPT_CTX_NEED_MONGO_COLLINFO: {
const filter = deserialize(context.nextMongoOperation());
if (!metaDataClient) {
throw new MongoCryptError(
'unreachable state machine state: entered MONGOCRYPT_CTX_NEED_MONGO_COLLINFO but metadata client is undefined'
);
}
const collInfoCursor = this.fetchCollectionInfo(
metaDataClient,
context.ns,
filter,
options
);
for await (const collInfo of collInfoCursor) {
context.addMongoOperationResponse(serialize(collInfo));
if (getState() === MONGOCRYPT_CTX_ERROR) break;
}
if (getState() === MONGOCRYPT_CTX_ERROR) break;
context.finishMongoOperation();
break;
}
case MONGOCRYPT_CTX_NEED_MONGO_MARKINGS: {
const command = context.nextMongoOperation();
if (getState() === MONGOCRYPT_CTX_ERROR) break;
if (!mongocryptdClient) {
throw new MongoCryptError(
'unreachable state machine state: entered MONGOCRYPT_CTX_NEED_MONGO_MARKINGS but mongocryptdClient is undefined'
);
}
// When we are using the shared library, we don't have a mongocryptd manager.
const markedCommand: Uint8Array = mongocryptdManager
? await mongocryptdManager.withRespawn(
this.markCommand.bind(this, mongocryptdClient, context.ns, command, options)
)
: await this.markCommand(mongocryptdClient, context.ns, command, options);
context.addMongoOperationResponse(markedCommand);
context.finishMongoOperation();
break;
}
case MONGOCRYPT_CTX_NEED_MONGO_KEYS: {
const filter = context.nextMongoOperation();
const keys = await this.fetchKeys(keyVaultClient, keyVaultNamespace, filter, options);
if (keys.length === 0) {
// See docs on EMPTY_V
result = EMPTY_V ??= serialize({ v: [] });
}
for await (const key of keys) {
context.addMongoOperationResponse(serialize(key));
}
context.finishMongoOperation();
break;
}
case MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS: {
const kmsProviders = await executor.askForKMSCredentials();
context.provideKMSProviders(serialize(kmsProviders));
break;
}
case MONGOCRYPT_CTX_NEED_KMS: {
await Promise.all(this.requests(context, options));
context.finishKMSRequests();
break;
}
case MONGOCRYPT_CTX_READY: {
const finalizedContext = context.finalize();
if (getState() === MONGOCRYPT_CTX_ERROR) {
const message = getStatus().message || 'Finalization error';
throw new MongoCryptError(message);
}
result = finalizedContext;
break;
}
default:
throw new MongoCryptError(`Unknown state: ${getState()}`);
}
}
if (getState() === MONGOCRYPT_CTX_ERROR || result == null) {
const message = getStatus().message;
if (!message) {
debug(
`unidentifiable error in MongoCrypt - received an error status from \`libmongocrypt\` but received no error message.`
);
}
throw new MongoCryptError(
message ??
'unidentifiable error in MongoCrypt - received an error status from `libmongocrypt` but received no error message.'
);
}
return result;
}
/**
* Handles the request to the KMS service. Exposed for testing purposes. Do not directly invoke.
* @param kmsContext - A C++ KMS context returned from the bindings
* @returns A promise that resolves when the KMS reply has be fully parsed
*/
async kmsRequest(
request: MongoCryptKMSRequest,
options?: { timeoutContext?: TimeoutContext } & Abortable
): Promise<void> {
const parsedUrl = request.endpoint.split(':');
const port = parsedUrl[1] != null ? Number.parseInt(parsedUrl[1], 10) : HTTPS_PORT;
const socketOptions: tls.ConnectionOptions & {
host: string;
port: number;
autoSelectFamily?: boolean;
autoSelectFamilyAttemptTimeout?: number;
} = {
host: parsedUrl[0],
servername: parsedUrl[0],
port,
...autoSelectSocketOptions(this.options.socketOptions || {})
};
const message = request.message;
const buffer = new BufferPool();
let netSocket: net.Socket;
let socket: tls.TLSSocket;
function destroySockets() {
for (const sock of [socket, netSocket]) {
if (sock) {
sock.destroy();
}
}
}
function onerror(cause: Error) {
return new MongoCryptError('KMS request failed', { cause });
}
function onclose() {
return new MongoCryptError('KMS request closed');
}
const tlsOptions = this.options.tlsOptions;
if (tlsOptions) {
const kmsProvider = request.kmsProvider;
const providerTlsOptions = tlsOptions[kmsProvider];
if (providerTlsOptions) {
const error = this.validateTlsOptions(kmsProvider, providerTlsOptions);
if (error) {
throw error;
}
try {
await this.setTlsOptions(providerTlsOptions, socketOptions);
} catch (err) {
throw onerror(err);
}
}
}
let abortListener;
try {
if (this.options.proxyOptions && this.options.proxyOptions.proxyHost) {
netSocket = new net.Socket();
const {
promise: willConnect,
reject: rejectOnNetSocketError,
resolve: resolveOnNetSocketConnect
} = promiseWithResolvers<void>();
netSocket
.once('error', err => rejectOnNetSocketError(onerror(err)))
.once('close', () => rejectOnNetSocketError(onclose()))
.once('connect', () => resolveOnNetSocketConnect());
const netSocketOptions = {
...socketOptions,
host: this.options.proxyOptions.proxyHost,
port: this.options.proxyOptions.proxyPort || 1080
};
netSocket.connect(netSocketOptions);
await willConnect;
try {
socks ??= loadSocks();
socketOptions.socket = (
await socks.SocksClient.createConnection({
existing_socket: netSocket,
command: 'connect',
destination: { host: socketOptions.host, port: socketOptions.port },
proxy: {
// host and port are ignored because we pass existing_socket
host: 'iLoveJavaScript',
port: 0,
type: 5,
userId: this.options.proxyOptions.proxyUsername,
password: this.options.proxyOptions.proxyPassword
}
})
).socket;
} catch (err) {
throw onerror(err);
}
}
socket = tls.connect(socketOptions, () => {
socket.write(message);
});
const {
promise: willResolveKmsRequest,
reject: rejectOnTlsSocketError,
resolve
} = promiseWithResolvers<void>();
abortListener = addAbortListener(options?.signal, function () {
destroySockets();
rejectOnTlsSocketError(this.reason);
});
socket
.once('error', err => rejectOnTlsSocketError(onerror(err)))
.once('close', () => rejectOnTlsSocketError(onclose()))
.on('data', data => {
buffer.append(data);
while (request.bytesNeeded > 0 && buffer.length) {
const bytesNeeded = Math.min(request.bytesNeeded, buffer.length);
request.addResponse(buffer.read(bytesNeeded));
}
if (request.bytesNeeded <= 0) {
resolve();
}
});
await (options?.timeoutContext?.csotEnabled()
? Promise.all([
willResolveKmsRequest,
Timeout.expires(options.timeoutContext?.remainingTimeMS)
])
: willResolveKmsRequest);
} catch (error) {
if (error instanceof TimeoutError)
throw new MongoOperationTimeoutError('KMS request timed out');
throw error;
} finally {
// There's no need for any more activity on this socket at this point.
destroySockets();
abortListener?.[kDispose]();
}
}
*requests(context: MongoCryptContext, options?: { timeoutContext?: TimeoutContext } & Abortable) {
for (
let request = context.nextKMSRequest();
request != null;
request = context.nextKMSRequest()
) {
yield this.kmsRequest(request, options);
}
}
/**
* Validates the provided TLS options are secure.
*
* @param kmsProvider - The KMS provider name.
* @param tlsOptions - The client TLS options for the provider.
*
* @returns An error if any option is invalid.
*/
validateTlsOptions(
kmsProvider: string,
tlsOptions: ClientEncryptionTlsOptions
): MongoCryptError | void {
const tlsOptionNames = Object.keys(tlsOptions);
for (const option of INSECURE_TLS_OPTIONS) {
if (tlsOptionNames.includes(option)) {
return new MongoCryptError(`Insecure TLS options prohibited for ${kmsProvider}: ${option}`);
}
}
}
/**
* Sets only the valid secure TLS options.
*
* @param tlsOptions - The client TLS options for the provider.
* @param options - The existing connection options.
*/
async setTlsOptions(
tlsOptions: ClientEncryptionTlsOptions,
options: tls.ConnectionOptions
): Promise<void> {
if (tlsOptions.tlsCertificateKeyFile) {
const cert = await fs.readFile(tlsOptions.tlsCertificateKeyFile);
options.cert = options.key = cert;
}
if (tlsOptions.tlsCAFile) {
options.ca = await fs.readFile(tlsOptions.tlsCAFile);
}
if (tlsOptions.tlsCertificateKeyFilePassword) {
options.passphrase = tlsOptions.tlsCertificateKeyFilePassword;
}
}
/**
* Fetches collection info for a provided namespace, when libmongocrypt
* enters the `MONGOCRYPT_CTX_NEED_MONGO_COLLINFO` state. The result is
* used to inform libmongocrypt of the schema associated with this
* namespace. Exposed for testing purposes. Do not directly invoke.
*
* @param client - A MongoClient connected to the topology
* @param ns - The namespace to list collections from
* @param filter - A filter for the listCollections command
* @param callback - Invoked with the info of the requested collection, or with an error
*/
fetchCollectionInfo(
client: MongoClient,
ns: string,
filter: Document,
options?: { timeoutContext?: TimeoutContext } & Abortable
): AsyncIterable<CollectionInfo> {
const { db } = MongoDBCollectionNamespace.fromString(ns);
const cursor = client.db(db).listCollections(filter, {
promoteLongs: false,
promoteValues: false,
timeoutContext:
options?.timeoutContext && new CursorTimeoutContext(options?.timeoutContext, Symbol()),
signal: options?.signal,
nameOnly: false
});
return cursor;
}
/**
* Calls to the mongocryptd to provide markings for a command.
* Exposed for testing purposes. Do not directly invoke.
* @param client - A MongoClient connected to a mongocryptd
* @param ns - The namespace (database.collection) the command is being executed on
* @param command - The command to execute.
* @param callback - Invoked with the serialized and marked bson command, or with an error
*/
async markCommand(
client: MongoClient,
ns: string,
command: Uint8Array,
options?: { timeoutContext?: TimeoutContext } & Abortable
): Promise<Uint8Array> {
const { db } = MongoDBCollectionNamespace.fromString(ns);
const bsonOptions = { promoteLongs: false, promoteValues: false };
const rawCommand = deserialize(command, bsonOptions);
const commandOptions: {
timeoutMS?: number;
signal?: AbortSignal;
} = {
timeoutMS: undefined,
signal: undefined
};
if (options?.timeoutContext?.csotEnabled()) {
commandOptions.timeoutMS = options.timeoutContext.remainingTimeMS;
}
if (options?.signal) {
commandOptions.signal = options.signal;
}
const response = await client.db(db).command(rawCommand, {
...bsonOptions,
...commandOptions
});
return serialize(response, this.bsonOptions);
}
/**
* Requests keys from the keyVault collection on the topology.
* Exposed for testing purposes. Do not directly invoke.
* @param client - A MongoClient connected to the topology
* @param keyVaultNamespace - The namespace (database.collection) of the keyVault Collection
* @param filter - The filter for the find query against the keyVault Collection
* @param callback - Invoked with the found keys, or with an error
*/
fetchKeys(
client: MongoClient,
keyVaultNamespace: string,
filter: Uint8Array,
options?: { timeoutContext?: TimeoutContext } & Abortable
): Promise<Array<DataKey>> {
const { db: dbName, collection: collectionName } =
MongoDBCollectionNamespace.fromString(keyVaultNamespace);
const commandOptions: {
timeoutContext?: CursorTimeoutContext;
signal?: AbortSignal;
} = {
timeoutContext: undefined,
signal: undefined
};
if (options?.timeoutContext != null) {
commandOptions.timeoutContext = new CursorTimeoutContext(options.timeoutContext, Symbol());
}
if (options?.signal != null) {
commandOptions.signal = options.signal;
}
return client
.db(dbName)
.collection<DataKey>(collectionName, { readConcern: { level: 'majority' } })
.find(deserialize(filter), commandOptions)
.toArray();
}
}

View file

@ -0,0 +1,77 @@
import type { Document } from '../../bson';
import { MongoRuntimeError } from '../../error';
import type { HandshakeDocument } from '../connect';
import type { Connection, ConnectionOptions } from '../connection';
import type { MongoCredentials } from './mongo_credentials';
/**
* Context used during authentication
* @internal
*/
export class AuthContext {
/** The connection to authenticate */
connection: Connection;
/** The credentials to use for authentication */
credentials?: MongoCredentials;
/** If the context is for reauthentication. */
reauthenticating = false;
/** The options passed to the `connect` method */
options: ConnectionOptions;
/** A response from an initial auth attempt, only some mechanisms use this (e.g, SCRAM) */
response?: Document;
/** A random nonce generated for use in an authentication conversation */
nonce?: Buffer;
constructor(
connection: Connection,
credentials: MongoCredentials | undefined,
options: ConnectionOptions
) {
this.connection = connection;
this.credentials = credentials;
this.options = options;
}
}
/**
* Provider used during authentication.
* @internal
*/
export abstract class AuthProvider {
/**
* Prepare the handshake document before the initial handshake.
*
* @param handshakeDoc - The document used for the initial handshake on a connection
* @param authContext - Context for authentication flow
*/
async prepare(
handshakeDoc: HandshakeDocument,
_authContext: AuthContext
): Promise<HandshakeDocument> {
return handshakeDoc;
}
/**
* Authenticate
*
* @param context - A shared context for authentication flow
*/
abstract auth(context: AuthContext): Promise<void>;
/**
* Reauthenticate.
* @param context - The shared auth context.
*/
async reauth(context: AuthContext): Promise<void> {
if (context.reauthenticating) {
throw new MongoRuntimeError('Reauthentication already in progress.');
}
try {
context.reauthenticating = true;
await this.auth(context);
} finally {
context.reauthenticating = false;
}
}
}

View file

@ -0,0 +1,185 @@
import { type AWSCredentials, getAwsCredentialProvider } from '../../deps';
import { MongoAWSError } from '../../error';
import { request } from '../../utils';
const AWS_RELATIVE_URI = 'http://169.254.170.2';
const AWS_EC2_URI = 'http://169.254.169.254';
const AWS_EC2_PATH = '/latest/meta-data/iam/security-credentials';
/**
* @internal
* This interface matches the final result of fetching temporary credentials manually, outlined
* in the spec [here](https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#ec2-endpoint).
*
* When we use the AWS SDK, we map the response from the SDK to conform to this interface.
*/
export interface AWSTempCredentials {
AccessKeyId?: string;
SecretAccessKey?: string;
Token?: string;
RoleArn?: string;
Expiration?: Date;
}
/** @public **/
export type AWSCredentialProvider = () => Promise<AWSCredentials>;
/**
* @internal
*
* Fetches temporary AWS credentials.
*/
export abstract class AWSTemporaryCredentialProvider {
abstract getCredentials(): Promise<AWSTempCredentials>;
private static _awsSDK: ReturnType<typeof getAwsCredentialProvider>;
protected static get awsSDK() {
AWSTemporaryCredentialProvider._awsSDK ??= getAwsCredentialProvider();
return AWSTemporaryCredentialProvider._awsSDK;
}
static get isAWSSDKInstalled(): boolean {
return !('kModuleError' in AWSTemporaryCredentialProvider.awsSDK);
}
}
/** @internal */
export class AWSSDKCredentialProvider extends AWSTemporaryCredentialProvider {
private _provider?: AWSCredentialProvider;
/**
* Create the SDK credentials provider.
* @param credentialsProvider - The credentials provider.
*/
constructor(credentialsProvider?: AWSCredentialProvider) {
super();
if (credentialsProvider) {
this._provider = credentialsProvider;
}
}
/**
* The AWS SDK caches credentials automatically and handles refresh when the credentials have expired.
* To ensure this occurs, we need to cache the `provider` returned by the AWS sdk and re-use it when fetching credentials.
*/
private get provider(): () => Promise<AWSCredentials> {
if ('kModuleError' in AWSTemporaryCredentialProvider.awsSDK) {
throw AWSTemporaryCredentialProvider.awsSDK.kModuleError;
}
if (this._provider) {
return this._provider;
}
let { AWS_STS_REGIONAL_ENDPOINTS = '', AWS_REGION = '' } = process.env;
AWS_STS_REGIONAL_ENDPOINTS = AWS_STS_REGIONAL_ENDPOINTS.toLowerCase();
AWS_REGION = AWS_REGION.toLowerCase();
/** The option setting should work only for users who have explicit settings in their environment, the driver should not encode "defaults" */
const awsRegionSettingsExist =
AWS_REGION.length !== 0 && AWS_STS_REGIONAL_ENDPOINTS.length !== 0;
/**
* The following regions use the global AWS STS endpoint, sts.amazonaws.com, by default
* https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html
*/
const LEGACY_REGIONS = new Set([
'ap-northeast-1',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'aws-global',
'ca-central-1',
'eu-central-1',
'eu-north-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2'
]);
/**
* If AWS_STS_REGIONAL_ENDPOINTS is set to regional, users are opting into the new behavior of respecting the region settings
*
* If AWS_STS_REGIONAL_ENDPOINTS is set to legacy, then "old" regions need to keep using the global setting.
* Technically the SDK gets this wrong, it reaches out to 'sts.us-east-1.amazonaws.com' when it should be 'sts.amazonaws.com'.
* That is not our bug to fix here. We leave that up to the SDK.
*/
const useRegionalSts =
AWS_STS_REGIONAL_ENDPOINTS === 'regional' ||
(AWS_STS_REGIONAL_ENDPOINTS === 'legacy' && !LEGACY_REGIONS.has(AWS_REGION));
this._provider =
awsRegionSettingsExist && useRegionalSts
? AWSTemporaryCredentialProvider.awsSDK.fromNodeProviderChain({
clientConfig: { region: AWS_REGION }
})
: AWSTemporaryCredentialProvider.awsSDK.fromNodeProviderChain();
return this._provider;
}
override async getCredentials(): Promise<AWSTempCredentials> {
/*
* Creates a credential provider that will attempt to find credentials from the
* following sources (listed in order of precedence):
*
* - Environment variables exposed via process.env
* - SSO credentials from token cache
* - Web identity token credentials
* - Shared credentials and config ini files
* - The EC2/ECS Instance Metadata Service
*/
try {
const creds = await this.provider();
return {
AccessKeyId: creds.accessKeyId,
SecretAccessKey: creds.secretAccessKey,
Token: creds.sessionToken,
Expiration: creds.expiration
};
} catch (error) {
throw new MongoAWSError(error.message, { cause: error });
}
}
}
/**
* @internal
* Fetches credentials manually (without the AWS SDK), as outlined in the [Obtaining Credentials](https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#obtaining-credentials)
* section of the Auth spec.
*/
export class LegacyAWSTemporaryCredentialProvider extends AWSTemporaryCredentialProvider {
override async getCredentials(): Promise<AWSTempCredentials> {
// If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
// is set then drivers MUST assume that it was set by an AWS ECS agent
if (process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI) {
return await request(
`${AWS_RELATIVE_URI}${process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI}`
);
}
// Otherwise assume we are on an EC2 instance
// get a token
const token = await request(`${AWS_EC2_URI}/latest/api/token`, {
method: 'PUT',
json: false,
headers: { 'X-aws-ec2-metadata-token-ttl-seconds': 30 }
});
// get role name
const roleName = await request(`${AWS_EC2_URI}/${AWS_EC2_PATH}`, {
json: false,
headers: { 'X-aws-ec2-metadata-token': token }
});
// get temp credentials
const creds = await request(`${AWS_EC2_URI}/${AWS_EC2_PATH}/${roleName}`, {
headers: { 'X-aws-ec2-metadata-token': token }
});
return creds;
}
}

View file

@ -0,0 +1,202 @@
import * as dns from 'dns';
import { getKerberos, type Kerberos, type KerberosClient } from '../../deps';
import { MongoInvalidArgumentError, MongoMissingCredentialsError } from '../../error';
import { ns } from '../../utils';
import type { Connection } from '../connection';
import { type AuthContext, AuthProvider } from './auth_provider';
/** @public */
export const GSSAPICanonicalizationValue = Object.freeze({
on: true,
off: false,
none: 'none',
forward: 'forward',
forwardAndReverse: 'forwardAndReverse'
} as const);
/** @public */
export type GSSAPICanonicalizationValue =
(typeof GSSAPICanonicalizationValue)[keyof typeof GSSAPICanonicalizationValue];
type MechanismProperties = {
CANONICALIZE_HOST_NAME?: GSSAPICanonicalizationValue;
SERVICE_HOST?: string;
SERVICE_NAME?: string;
SERVICE_REALM?: string;
};
async function externalCommand(
connection: Connection,
command: ReturnType<typeof saslStart> | ReturnType<typeof saslContinue>
): Promise<{ payload: string; conversationId: number }> {
const response = await connection.command(ns('$external.$cmd'), command);
return response as { payload: string; conversationId: number };
}
let krb: Kerberos;
export class GSSAPI extends AuthProvider {
override async auth(authContext: AuthContext): Promise<void> {
const { connection, credentials } = authContext;
if (credentials == null) {
throw new MongoMissingCredentialsError('Credentials required for GSSAPI authentication');
}
const { username } = credentials;
const client = await makeKerberosClient(authContext);
const payload = await client.step('');
const saslStartResponse = await externalCommand(connection, saslStart(payload));
const negotiatedPayload = await negotiate(client, 10, saslStartResponse.payload);
const saslContinueResponse = await externalCommand(
connection,
saslContinue(negotiatedPayload, saslStartResponse.conversationId)
);
const finalizePayload = await finalize(client, username, saslContinueResponse.payload);
await externalCommand(connection, {
saslContinue: 1,
conversationId: saslContinueResponse.conversationId,
payload: finalizePayload
});
}
}
async function makeKerberosClient(authContext: AuthContext): Promise<KerberosClient> {
const { hostAddress } = authContext.options;
const { credentials } = authContext;
if (!hostAddress || typeof hostAddress.host !== 'string' || !credentials) {
throw new MongoInvalidArgumentError(
'Connection must have host and port and credentials defined.'
);
}
loadKrb();
if ('kModuleError' in krb) {
throw krb['kModuleError'];
}
const { initializeClient } = krb;
const { username, password } = credentials;
const mechanismProperties = credentials.mechanismProperties as MechanismProperties;
const serviceName = mechanismProperties.SERVICE_NAME ?? 'mongodb';
const host = await performGSSAPICanonicalizeHostName(hostAddress.host, mechanismProperties);
const initOptions = {};
if (password != null) {
// TODO(NODE-5139): These do not match the typescript options in initializeClient
Object.assign(initOptions, { user: username, password: password });
}
const spnHost = mechanismProperties.SERVICE_HOST ?? host;
let spn = `${serviceName}${process.platform === 'win32' ? '/' : '@'}${spnHost}`;
if ('SERVICE_REALM' in mechanismProperties) {
spn = `${spn}@${mechanismProperties.SERVICE_REALM}`;
}
return await initializeClient(spn, initOptions);
}
function saslStart(payload: string) {
return {
saslStart: 1,
mechanism: 'GSSAPI',
payload,
autoAuthorize: 1
} as const;
}
function saslContinue(payload: string, conversationId: number) {
return {
saslContinue: 1,
conversationId,
payload
} as const;
}
async function negotiate(
client: KerberosClient,
retries: number,
payload: string
): Promise<string> {
try {
const response = await client.step(payload);
return response || '';
} catch (error) {
if (retries === 0) {
// Retries exhausted, raise error
throw error;
}
// Adjust number of retries and call step again
return await negotiate(client, retries - 1, payload);
}
}
async function finalize(client: KerberosClient, user: string, payload: string): Promise<string> {
// GSS Client Unwrap
const response = await client.unwrap(payload);
return await client.wrap(response || '', { user });
}
export async function performGSSAPICanonicalizeHostName(
host: string,
mechanismProperties: MechanismProperties
): Promise<string> {
const mode = mechanismProperties.CANONICALIZE_HOST_NAME;
if (!mode || mode === GSSAPICanonicalizationValue.none) {
return host;
}
// If forward and reverse or true
if (
mode === GSSAPICanonicalizationValue.on ||
mode === GSSAPICanonicalizationValue.forwardAndReverse
) {
// Perform the lookup of the ip address.
const { address } = await dns.promises.lookup(host);
try {
// Perform a reverse ptr lookup on the ip address.
const results = await dns.promises.resolvePtr(address);
// If the ptr did not error but had no results, return the host.
return results.length > 0 ? results[0] : host;
} catch {
// This can error as ptr records may not exist for all ips. In this case
// fallback to a cname lookup as dns.lookup() does not return the
// cname.
return await resolveCname(host);
}
} else {
// The case for forward is just to resolve the cname as dns.lookup()
// will not return it.
return await resolveCname(host);
}
}
export async function resolveCname(host: string): Promise<string> {
// Attempt to resolve the host name
try {
const results = await dns.promises.resolveCname(host);
// Get the first resolved host id
return results.length > 0 ? results[0] : host;
} catch {
return host;
}
}
/**
* Load the Kerberos library.
*/
function loadKrb() {
if (!krb) {
krb = getKerberos();
}
}

View file

@ -0,0 +1,310 @@
// Resolves the default auth mechanism according to
// Resolves the default auth mechanism according to
import type { Document } from '../../bson';
import {
MongoAPIError,
MongoInvalidArgumentError,
MongoMissingCredentialsError
} from '../../error';
import type { AWSCredentialProvider } from './aws_temporary_credentials';
import { GSSAPICanonicalizationValue } from './gssapi';
import type { OIDCCallbackFunction } from './mongodb_oidc';
import { AUTH_MECHS_AUTH_SRC_EXTERNAL, AuthMechanism } from './providers';
/**
* @see https://github.com/mongodb/specifications/blob/master/source/auth/auth.md
*/
function getDefaultAuthMechanism(hello: Document | null): AuthMechanism {
if (hello) {
// If hello contains saslSupportedMechs, use scram-sha-256
// if it is available, else scram-sha-1
if (Array.isArray(hello.saslSupportedMechs)) {
return hello.saslSupportedMechs.includes(AuthMechanism.MONGODB_SCRAM_SHA256)
? AuthMechanism.MONGODB_SCRAM_SHA256
: AuthMechanism.MONGODB_SCRAM_SHA1;
}
}
// Default auth mechanism for 4.0 and higher.
return AuthMechanism.MONGODB_SCRAM_SHA256;
}
const ALLOWED_ENVIRONMENT_NAMES: AuthMechanismProperties['ENVIRONMENT'][] = [
'test',
'azure',
'gcp',
'k8s'
];
const ALLOWED_HOSTS_ERROR = 'Auth mechanism property ALLOWED_HOSTS must be an array of strings.';
/** @internal */
export const DEFAULT_ALLOWED_HOSTS = [
'*.mongodb.net',
'*.mongodb-qa.net',
'*.mongodb-dev.net',
'*.mongodbgov.net',
'localhost',
'127.0.0.1',
'::1'
];
/** Error for when the token audience is missing in the environment. */
const TOKEN_RESOURCE_MISSING_ERROR =
'TOKEN_RESOURCE must be set in the auth mechanism properties when ENVIRONMENT is azure or gcp.';
/** @public */
export interface AuthMechanismProperties extends Document {
SERVICE_HOST?: string;
SERVICE_NAME?: string;
SERVICE_REALM?: string;
CANONICALIZE_HOST_NAME?: GSSAPICanonicalizationValue;
AWS_SESSION_TOKEN?: string;
/** A user provided OIDC machine callback function. */
OIDC_CALLBACK?: OIDCCallbackFunction;
/** A user provided OIDC human interacted callback function. */
OIDC_HUMAN_CALLBACK?: OIDCCallbackFunction;
/** The OIDC environment. Note that 'test' is for internal use only. */
ENVIRONMENT?: 'test' | 'azure' | 'gcp' | 'k8s';
/** Allowed hosts that OIDC auth can connect to. */
ALLOWED_HOSTS?: string[];
/** The resource token for OIDC auth in Azure and GCP. */
TOKEN_RESOURCE?: string;
/**
* A custom AWS credential provider to use. An example using the AWS SDK default provider chain:
*
* ```ts
* const client = new MongoClient(process.env.MONGODB_URI, {
* authMechanismProperties: {
* AWS_CREDENTIAL_PROVIDER: fromNodeProviderChain()
* }
* });
* ```
*
* Using a custom function that returns AWS credentials:
*
* ```ts
* const client = new MongoClient(process.env.MONGODB_URI, {
* authMechanismProperties: {
* AWS_CREDENTIAL_PROVIDER: async () => {
* return {
* accessKeyId: process.env.ACCESS_KEY_ID,
* secretAccessKey: process.env.SECRET_ACCESS_KEY
* }
* }
* }
* });
* ```
*/
AWS_CREDENTIAL_PROVIDER?: AWSCredentialProvider;
}
/** @public */
export interface MongoCredentialsOptions {
username?: string;
password: string;
source: string;
db?: string;
mechanism?: AuthMechanism;
mechanismProperties: AuthMechanismProperties;
}
/**
* A representation of the credentials used by MongoDB
* @public
*/
export class MongoCredentials {
/** The username used for authentication */
readonly username: string;
/** The password used for authentication */
readonly password: string;
/** The database that the user should authenticate against */
readonly source: string;
/** The method used to authenticate */
readonly mechanism: AuthMechanism;
/** Special properties used by some types of auth mechanisms */
readonly mechanismProperties: AuthMechanismProperties;
constructor(options: MongoCredentialsOptions) {
this.username = options.username ?? '';
this.password = options.password;
this.source = options.source;
if (!this.source && options.db) {
this.source = options.db;
}
this.mechanism = options.mechanism || AuthMechanism.MONGODB_DEFAULT;
this.mechanismProperties = options.mechanismProperties || {};
if (this.mechanism.match(/MONGODB-AWS/i)) {
if (!this.username && process.env.AWS_ACCESS_KEY_ID) {
this.username = process.env.AWS_ACCESS_KEY_ID;
}
if (!this.password && process.env.AWS_SECRET_ACCESS_KEY) {
this.password = process.env.AWS_SECRET_ACCESS_KEY;
}
if (
this.mechanismProperties.AWS_SESSION_TOKEN == null &&
process.env.AWS_SESSION_TOKEN != null
) {
this.mechanismProperties = {
...this.mechanismProperties,
AWS_SESSION_TOKEN: process.env.AWS_SESSION_TOKEN
};
}
}
if (this.mechanism === AuthMechanism.MONGODB_OIDC && !this.mechanismProperties.ALLOWED_HOSTS) {
this.mechanismProperties = {
...this.mechanismProperties,
ALLOWED_HOSTS: DEFAULT_ALLOWED_HOSTS
};
}
Object.freeze(this.mechanismProperties);
Object.freeze(this);
}
/** Determines if two MongoCredentials objects are equivalent */
equals(other: MongoCredentials): boolean {
return (
this.mechanism === other.mechanism &&
this.username === other.username &&
this.password === other.password &&
this.source === other.source
);
}
/**
* If the authentication mechanism is set to "default", resolves the authMechanism
* based on the server version and server supported sasl mechanisms.
*
* @param hello - A hello response from the server
*/
resolveAuthMechanism(hello: Document | null): MongoCredentials {
// If the mechanism is not "default", then it does not need to be resolved
if (this.mechanism.match(/DEFAULT/i)) {
return new MongoCredentials({
username: this.username,
password: this.password,
source: this.source,
mechanism: getDefaultAuthMechanism(hello),
mechanismProperties: this.mechanismProperties
});
}
return this;
}
validate(): void {
if (
(this.mechanism === AuthMechanism.MONGODB_GSSAPI ||
this.mechanism === AuthMechanism.MONGODB_PLAIN ||
this.mechanism === AuthMechanism.MONGODB_SCRAM_SHA1 ||
this.mechanism === AuthMechanism.MONGODB_SCRAM_SHA256) &&
!this.username
) {
throw new MongoMissingCredentialsError(`Username required for mechanism '${this.mechanism}'`);
}
if (this.mechanism === AuthMechanism.MONGODB_OIDC) {
if (
this.username &&
this.mechanismProperties.ENVIRONMENT &&
this.mechanismProperties.ENVIRONMENT !== 'azure'
) {
throw new MongoInvalidArgumentError(
`username and ENVIRONMENT '${this.mechanismProperties.ENVIRONMENT}' may not be used together for mechanism '${this.mechanism}'.`
);
}
if (this.username && this.password) {
throw new MongoInvalidArgumentError(
`No password is allowed in ENVIRONMENT '${this.mechanismProperties.ENVIRONMENT}' for '${this.mechanism}'.`
);
}
if (
(this.mechanismProperties.ENVIRONMENT === 'azure' ||
this.mechanismProperties.ENVIRONMENT === 'gcp') &&
!this.mechanismProperties.TOKEN_RESOURCE
) {
throw new MongoInvalidArgumentError(TOKEN_RESOURCE_MISSING_ERROR);
}
if (
this.mechanismProperties.ENVIRONMENT &&
!ALLOWED_ENVIRONMENT_NAMES.includes(this.mechanismProperties.ENVIRONMENT)
) {
throw new MongoInvalidArgumentError(
`Currently only a ENVIRONMENT in ${ALLOWED_ENVIRONMENT_NAMES.join(
','
)} is supported for mechanism '${this.mechanism}'.`
);
}
if (
!this.mechanismProperties.ENVIRONMENT &&
!this.mechanismProperties.OIDC_CALLBACK &&
!this.mechanismProperties.OIDC_HUMAN_CALLBACK
) {
throw new MongoInvalidArgumentError(
`Either a ENVIRONMENT, OIDC_CALLBACK, or OIDC_HUMAN_CALLBACK must be specified for mechanism '${this.mechanism}'.`
);
}
if (this.mechanismProperties.ALLOWED_HOSTS) {
const hosts = this.mechanismProperties.ALLOWED_HOSTS;
if (!Array.isArray(hosts)) {
throw new MongoInvalidArgumentError(ALLOWED_HOSTS_ERROR);
}
for (const host of hosts) {
if (typeof host !== 'string') {
throw new MongoInvalidArgumentError(ALLOWED_HOSTS_ERROR);
}
}
}
}
if (AUTH_MECHS_AUTH_SRC_EXTERNAL.has(this.mechanism)) {
if (this.source != null && this.source !== '$external') {
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
throw new MongoAPIError(
`Invalid source '${this.source}' for mechanism '${this.mechanism}' specified.`
);
}
}
if (this.mechanism === AuthMechanism.MONGODB_PLAIN && this.source == null) {
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
throw new MongoAPIError('PLAIN Authentication Mechanism needs an auth source');
}
if (this.mechanism === AuthMechanism.MONGODB_X509 && this.password != null) {
if (this.password === '') {
Reflect.set(this, 'password', undefined);
return;
}
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
throw new MongoAPIError(`Password not allowed for mechanism MONGODB-X509`);
}
const canonicalization = this.mechanismProperties.CANONICALIZE_HOST_NAME ?? false;
if (!Object.values(GSSAPICanonicalizationValue).includes(canonicalization)) {
throw new MongoAPIError(`Invalid CANONICALIZE_HOST_NAME value: ${canonicalization}`);
}
}
static merge(
creds: MongoCredentials | undefined,
options: Partial<MongoCredentialsOptions>
): MongoCredentials {
return new MongoCredentials({
username: options.username ?? creds?.username ?? '',
password: options.password ?? creds?.password ?? '',
mechanism: options.mechanism ?? creds?.mechanism ?? AuthMechanism.MONGODB_DEFAULT,
mechanismProperties: options.mechanismProperties ?? creds?.mechanismProperties ?? {},
source: options.source ?? options.db ?? creds?.source ?? 'admin'
});
}
}

View file

@ -0,0 +1,195 @@
import type { Binary, BSONSerializeOptions } from '../../bson';
import * as BSON from '../../bson';
import { aws4 } from '../../deps';
import {
MongoCompatibilityError,
MongoMissingCredentialsError,
MongoRuntimeError
} from '../../error';
import { ByteUtils, maxWireVersion, ns, randomBytes } from '../../utils';
import { type AuthContext, AuthProvider } from './auth_provider';
import {
type AWSCredentialProvider,
AWSSDKCredentialProvider,
type AWSTempCredentials,
AWSTemporaryCredentialProvider,
LegacyAWSTemporaryCredentialProvider
} from './aws_temporary_credentials';
import { MongoCredentials } from './mongo_credentials';
import { AuthMechanism } from './providers';
const ASCII_N = 110;
const bsonOptions: BSONSerializeOptions = {
useBigInt64: false,
promoteLongs: true,
promoteValues: true,
promoteBuffers: false,
bsonRegExp: false
};
interface AWSSaslContinuePayload {
a: string;
d: string;
t?: string;
}
export class MongoDBAWS extends AuthProvider {
private credentialFetcher: AWSTemporaryCredentialProvider;
private credentialProvider?: AWSCredentialProvider;
constructor(credentialProvider?: AWSCredentialProvider) {
super();
this.credentialProvider = credentialProvider;
this.credentialFetcher = AWSTemporaryCredentialProvider.isAWSSDKInstalled
? new AWSSDKCredentialProvider(credentialProvider)
: new LegacyAWSTemporaryCredentialProvider();
}
override async auth(authContext: AuthContext): Promise<void> {
const { connection } = authContext;
if (!authContext.credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
if ('kModuleError' in aws4) {
throw aws4['kModuleError'];
}
const { sign } = aws4;
if (maxWireVersion(connection) < 9) {
throw new MongoCompatibilityError(
'MONGODB-AWS authentication requires MongoDB version 4.4 or later'
);
}
if (!authContext.credentials.username) {
authContext.credentials = await makeTempCredentials(
authContext.credentials,
this.credentialFetcher
);
}
const { credentials } = authContext;
const accessKeyId = credentials.username;
const secretAccessKey = credentials.password;
// Allow the user to specify an AWS session token for authentication with temporary credentials.
const sessionToken = credentials.mechanismProperties.AWS_SESSION_TOKEN;
// If all three defined, include sessionToken, else include username and pass, else no credentials
const awsCredentials =
accessKeyId && secretAccessKey && sessionToken
? { accessKeyId, secretAccessKey, sessionToken }
: accessKeyId && secretAccessKey
? { accessKeyId, secretAccessKey }
: undefined;
const db = credentials.source;
const nonce = await randomBytes(32);
// All messages between MongoDB clients and servers are sent as BSON objects
// in the payload field of saslStart and saslContinue.
const saslStart = {
saslStart: 1,
mechanism: 'MONGODB-AWS',
payload: BSON.serialize({ r: nonce, p: ASCII_N }, bsonOptions)
};
const saslStartResponse = await connection.command(ns(`${db}.$cmd`), saslStart, undefined);
const serverResponse = BSON.deserialize(saslStartResponse.payload.buffer, bsonOptions) as {
s: Binary;
h: string;
};
const host = serverResponse.h;
const serverNonce = serverResponse.s.buffer;
if (serverNonce.length !== 64) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Invalid server nonce length ${serverNonce.length}, expected 64`);
}
if (!ByteUtils.equals(serverNonce.subarray(0, nonce.byteLength), nonce)) {
// throw because the serverNonce's leading 32 bytes must equal the client nonce's 32 bytes
// https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#conversation-5
// TODO(NODE-3483)
throw new MongoRuntimeError('Server nonce does not begin with client nonce');
}
if (host.length < 1 || host.length > 255 || host.indexOf('..') !== -1) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Server returned an invalid host: "${host}"`);
}
const body = 'Action=GetCallerIdentity&Version=2011-06-15';
const options = sign(
{
method: 'POST',
host,
region: deriveRegion(serverResponse.h),
service: 'sts',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': body.length,
'X-MongoDB-Server-Nonce': ByteUtils.toBase64(serverNonce),
'X-MongoDB-GS2-CB-Flag': 'n'
},
path: '/',
body
},
awsCredentials
);
const payload: AWSSaslContinuePayload = {
a: options.headers.Authorization,
d: options.headers['X-Amz-Date']
};
if (sessionToken) {
payload.t = sessionToken;
}
const saslContinue = {
saslContinue: 1,
conversationId: saslStartResponse.conversationId,
payload: BSON.serialize(payload, bsonOptions)
};
await connection.command(ns(`${db}.$cmd`), saslContinue, undefined);
}
}
async function makeTempCredentials(
credentials: MongoCredentials,
awsCredentialFetcher: AWSTemporaryCredentialProvider
): Promise<MongoCredentials> {
function makeMongoCredentialsFromAWSTemp(creds: AWSTempCredentials) {
// The AWS session token (creds.Token) may or may not be set.
if (!creds.AccessKeyId || !creds.SecretAccessKey) {
throw new MongoMissingCredentialsError('Could not obtain temporary MONGODB-AWS credentials');
}
return new MongoCredentials({
username: creds.AccessKeyId,
password: creds.SecretAccessKey,
source: credentials.source,
mechanism: AuthMechanism.MONGODB_AWS,
mechanismProperties: {
AWS_SESSION_TOKEN: creds.Token
}
});
}
const temporaryCredentials = await awsCredentialFetcher.getCredentials();
return makeMongoCredentialsFromAWSTemp(temporaryCredentials);
}
function deriveRegion(host: string) {
const parts = host.split('.');
if (parts.length === 1 || parts[1] === 'amazonaws') {
return 'us-east-1';
}
return parts[1];
}

View file

@ -0,0 +1,180 @@
import type { Document } from '../../bson';
import { MongoInvalidArgumentError, MongoMissingCredentialsError } from '../../error';
import type { HandshakeDocument } from '../connect';
import type { Connection } from '../connection';
import { type AuthContext, AuthProvider } from './auth_provider';
import type { MongoCredentials } from './mongo_credentials';
import { AzureMachineWorkflow } from './mongodb_oidc/azure_machine_workflow';
import { GCPMachineWorkflow } from './mongodb_oidc/gcp_machine_workflow';
import { K8SMachineWorkflow } from './mongodb_oidc/k8s_machine_workflow';
import { TokenCache } from './mongodb_oidc/token_cache';
import { TokenMachineWorkflow } from './mongodb_oidc/token_machine_workflow';
/** Error when credentials are missing. */
const MISSING_CREDENTIALS_ERROR = 'AuthContext must provide credentials.';
/**
* The information returned by the server on the IDP server.
* @public
*/
export interface IdPInfo {
/**
* A URL which describes the Authentication Server. This identifier should
* be the iss of provided access tokens, and be viable for RFC8414 metadata
* discovery and RFC9207 identification.
*/
issuer: string;
/** A unique client ID for this OIDC client. */
clientId: string;
/** A list of additional scopes to request from IdP. */
requestScopes?: string[];
}
/**
* The response from the IdP server with the access token and
* optional expiration time and refresh token.
* @public
*/
export interface IdPServerResponse {
/** The OIDC access token. */
accessToken: string;
/** The time when the access token expires. For future use. */
expiresInSeconds?: number;
/** The refresh token, if applicable, to be used by the callback to request a new token from the issuer. */
refreshToken?: string;
}
/**
* The response required to be returned from the machine or
* human callback workflows' callback.
* @public
*/
export interface OIDCResponse {
/** The OIDC access token. */
accessToken: string;
/** The time when the access token expires. For future use. */
expiresInSeconds?: number;
/** The refresh token, if applicable, to be used by the callback to request a new token from the issuer. */
refreshToken?: string;
}
/**
* The parameters that the driver provides to the user supplied
* human or machine callback.
*
* The version number is used to communicate callback API changes that are not breaking but that
* users may want to know about and review their implementation. Users may wish to check the version
* number and throw an error if their expected version number and the one provided do not match.
* @public
*/
export interface OIDCCallbackParams {
/** Optional username. */
username?: string;
/** The context in which to timeout the OIDC callback. */
timeoutContext: AbortSignal;
/** The current OIDC API version. */
version: 1;
/** The IdP information returned from the server. */
idpInfo?: IdPInfo;
/** The refresh token, if applicable, to be used by the callback to request a new token from the issuer. */
refreshToken?: string;
}
/**
* The signature of the human or machine callback functions.
* @public
*/
export type OIDCCallbackFunction = (params: OIDCCallbackParams) => Promise<OIDCResponse>;
/** The current version of OIDC implementation. */
export const OIDC_VERSION = 1;
type EnvironmentName = 'test' | 'azure' | 'gcp' | 'k8s' | undefined;
/** @internal */
export interface Workflow {
/**
* All device workflows must implement this method in order to get the access
* token and then call authenticate with it.
*/
execute(
connection: Connection,
credentials: MongoCredentials,
response?: Document
): Promise<void>;
/**
* Each workflow should specify the correct custom behaviour for reauthentication.
*/
reauthenticate(connection: Connection, credentials: MongoCredentials): Promise<void>;
/**
* Get the document to add for speculative authentication.
*/
speculativeAuth(connection: Connection, credentials: MongoCredentials): Promise<Document>;
}
/** @internal */
export const OIDC_WORKFLOWS: Map<EnvironmentName, () => Workflow> = new Map();
OIDC_WORKFLOWS.set('test', () => new TokenMachineWorkflow(new TokenCache()));
OIDC_WORKFLOWS.set('azure', () => new AzureMachineWorkflow(new TokenCache()));
OIDC_WORKFLOWS.set('gcp', () => new GCPMachineWorkflow(new TokenCache()));
OIDC_WORKFLOWS.set('k8s', () => new K8SMachineWorkflow(new TokenCache()));
/**
* OIDC auth provider.
*/
export class MongoDBOIDC extends AuthProvider {
workflow: Workflow;
/**
* Instantiate the auth provider.
*/
constructor(workflow?: Workflow) {
super();
if (!workflow) {
throw new MongoInvalidArgumentError('No workflow provided to the OIDC auth provider.');
}
this.workflow = workflow;
}
/**
* Authenticate using OIDC
*/
override async auth(authContext: AuthContext): Promise<void> {
const { connection, reauthenticating, response } = authContext;
if (response?.speculativeAuthenticate?.done && !reauthenticating) {
return;
}
const credentials = getCredentials(authContext);
if (reauthenticating) {
await this.workflow.reauthenticate(connection, credentials);
} else {
await this.workflow.execute(connection, credentials, response);
}
}
/**
* Add the speculative auth for the initial handshake.
*/
override async prepare(
handshakeDoc: HandshakeDocument,
authContext: AuthContext
): Promise<HandshakeDocument> {
const { connection } = authContext;
const credentials = getCredentials(authContext);
const result = await this.workflow.speculativeAuth(connection, credentials);
return { ...handshakeDoc, ...result };
}
}
/**
* Get credentials from the auth context, throwing if they do not exist.
*/
function getCredentials(authContext: AuthContext): MongoCredentials {
const { credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError(MISSING_CREDENTIALS_ERROR);
}
return credentials;
}

View file

@ -0,0 +1,82 @@
import { MONGODB_ERROR_CODES, MongoError, MongoOIDCError } from '../../../error';
import { Timeout, TimeoutError } from '../../../timeout';
import { type Connection } from '../../connection';
import { type MongoCredentials } from '../mongo_credentials';
import {
OIDC_VERSION,
type OIDCCallbackFunction,
type OIDCCallbackParams,
type OIDCResponse
} from '../mongodb_oidc';
import { AUTOMATED_TIMEOUT_MS, CallbackWorkflow } from './callback_workflow';
import { type TokenCache } from './token_cache';
/**
* Class implementing behaviour for the non human callback workflow.
* @internal
*/
export class AutomatedCallbackWorkflow extends CallbackWorkflow {
/**
* Instantiate the human callback workflow.
*/
constructor(cache: TokenCache, callback: OIDCCallbackFunction) {
super(cache, callback);
}
/**
* Execute the OIDC callback workflow.
*/
async execute(connection: Connection, credentials: MongoCredentials): Promise<void> {
// If there is a cached access token, try to authenticate with it. If
// authentication fails with an Authentication error (18),
// invalidate the access token, fetch a new access token, and try
// to authenticate again.
// If the server fails for any other reason, do not clear the cache.
if (this.cache.hasAccessToken) {
const token = this.cache.getAccessToken();
try {
return await this.finishAuthentication(connection, credentials, token);
} catch (error) {
if (
error instanceof MongoError &&
error.code === MONGODB_ERROR_CODES.AuthenticationFailed
) {
this.cache.removeAccessToken();
return await this.execute(connection, credentials);
} else {
throw error;
}
}
}
const response = await this.fetchAccessToken(credentials);
this.cache.put(response);
connection.accessToken = response.accessToken;
await this.finishAuthentication(connection, credentials, response.accessToken);
}
/**
* Fetches the access token using the callback.
*/
protected async fetchAccessToken(credentials: MongoCredentials): Promise<OIDCResponse> {
const controller = new AbortController();
const params: OIDCCallbackParams = {
timeoutContext: controller.signal,
version: OIDC_VERSION
};
if (credentials.username) {
params.username = credentials.username;
}
const timeout = Timeout.expires(AUTOMATED_TIMEOUT_MS);
try {
return await Promise.race([this.executeAndValidateCallback(params), timeout]);
} catch (error) {
if (TimeoutError.is(error)) {
controller.abort();
throw new MongoOIDCError(`OIDC callback timed out after ${AUTOMATED_TIMEOUT_MS}ms.`);
}
throw error;
} finally {
timeout.clear();
}
}
}

View file

@ -0,0 +1,85 @@
import { addAzureParams, AZURE_BASE_URL } from '../../../client-side-encryption/providers/azure';
import { MongoAzureError } from '../../../error';
import { get } from '../../../utils';
import type { MongoCredentials } from '../mongo_credentials';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** Azure request headers. */
const AZURE_HEADERS = Object.freeze({ Metadata: 'true', Accept: 'application/json' });
/** Invalid endpoint result error. */
const ENDPOINT_RESULT_ERROR =
'Azure endpoint did not return a value with only access_token and expires_in properties';
/** Error for when the token audience is missing in the environment. */
const TOKEN_RESOURCE_MISSING_ERROR =
'TOKEN_RESOURCE must be set in the auth mechanism properties when ENVIRONMENT is azure.';
/**
* Device workflow implementation for Azure.
*
* @internal
*/
export class AzureMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(credentials?: MongoCredentials): Promise<AccessToken> {
const tokenAudience = credentials?.mechanismProperties.TOKEN_RESOURCE;
const username = credentials?.username;
if (!tokenAudience) {
throw new MongoAzureError(TOKEN_RESOURCE_MISSING_ERROR);
}
const response = await getAzureTokenData(tokenAudience, username);
if (!isEndpointResultValid(response)) {
throw new MongoAzureError(ENDPOINT_RESULT_ERROR);
}
return response;
}
}
/**
* Hit the Azure endpoint to get the token data.
*/
async function getAzureTokenData(tokenAudience: string, username?: string): Promise<AccessToken> {
const url = new URL(AZURE_BASE_URL);
addAzureParams(url, tokenAudience, username);
const response = await get(url, {
headers: AZURE_HEADERS
});
if (response.status !== 200) {
throw new MongoAzureError(
`Status code ${response.status} returned from the Azure endpoint. Response body: ${response.body}`
);
}
const result = JSON.parse(response.body);
return {
access_token: result.access_token,
expires_in: Number(result.expires_in)
};
}
/**
* Determines if a result returned from the endpoint is valid.
* This means the result is not nullish, contains the access_token required field
* and the expires_in required field.
*/
function isEndpointResultValid(
token: unknown
): token is { access_token: unknown; expires_in: unknown } {
if (token == null || typeof token !== 'object') return false;
return (
'access_token' in token &&
typeof token.access_token === 'string' &&
'expires_in' in token &&
typeof token.expires_in === 'number'
);
}

View file

@ -0,0 +1,188 @@
import { setTimeout } from 'timers/promises';
import { type Document } from '../../../bson';
import { MongoMissingCredentialsError } from '../../../error';
import { ns } from '../../../utils';
import type { Connection } from '../../connection';
import type { MongoCredentials } from '../mongo_credentials';
import {
type OIDCCallbackFunction,
type OIDCCallbackParams,
type OIDCResponse,
type Workflow
} from '../mongodb_oidc';
import { finishCommandDocument, startCommandDocument } from './command_builders';
import { type TokenCache } from './token_cache';
/** 5 minutes in milliseconds */
export const HUMAN_TIMEOUT_MS = 300000;
/** 1 minute in milliseconds */
export const AUTOMATED_TIMEOUT_MS = 60000;
/** Properties allowed on results of callbacks. */
const RESULT_PROPERTIES = ['accessToken', 'expiresInSeconds', 'refreshToken'];
/** Error message when the callback result is invalid. */
const CALLBACK_RESULT_ERROR =
'User provided OIDC callbacks must return a valid object with an accessToken.';
/** The time to throttle callback calls. */
const THROTTLE_MS = 100;
/**
* OIDC implementation of a callback based workflow.
* @internal
*/
export abstract class CallbackWorkflow implements Workflow {
cache: TokenCache;
callback: OIDCCallbackFunction;
lastExecutionTime: number;
/**
* Instantiate the callback workflow.
*/
constructor(cache: TokenCache, callback: OIDCCallbackFunction) {
this.cache = cache;
this.callback = this.withLock(callback);
this.lastExecutionTime = Date.now() - THROTTLE_MS;
}
/**
* Get the document to add for speculative authentication. This also needs
* to add a db field from the credentials source.
*/
async speculativeAuth(connection: Connection, credentials: MongoCredentials): Promise<Document> {
// Check if the Client Cache has an access token.
// If it does, cache the access token in the Connection Cache and send a JwtStepRequest
// with the cached access token in the speculative authentication SASL payload.
if (this.cache.hasAccessToken) {
const accessToken = this.cache.getAccessToken();
connection.accessToken = accessToken;
const document = finishCommandDocument(accessToken);
document.db = credentials.source;
return { speculativeAuthenticate: document };
}
return {};
}
/**
* Reauthenticate the callback workflow. For this we invalidated the access token
* in the cache and run the authentication steps again. No initial handshake needs
* to be sent.
*/
async reauthenticate(connection: Connection, credentials: MongoCredentials): Promise<void> {
if (this.cache.hasAccessToken) {
// Reauthentication implies the token has expired.
if (connection.accessToken === this.cache.getAccessToken()) {
// If connection's access token is the same as the cache's, remove
// the token from the cache and connection.
this.cache.removeAccessToken();
delete connection.accessToken;
} else {
// If the connection's access token is different from the cache's, set
// the cache's token on the connection and do not remove from the
// cache.
connection.accessToken = this.cache.getAccessToken();
}
}
await this.execute(connection, credentials);
}
/**
* Execute the OIDC callback workflow.
*/
abstract execute(
connection: Connection,
credentials: MongoCredentials,
response?: Document
): Promise<void>;
/**
* Starts the callback authentication process. If there is a speculative
* authentication document from the initial handshake, then we will use that
* value to get the issuer, otherwise we will send the saslStart command.
*/
protected async startAuthentication(
connection: Connection,
credentials: MongoCredentials,
response?: Document
): Promise<Document> {
let result;
if (response?.speculativeAuthenticate) {
result = response.speculativeAuthenticate;
} else {
result = await connection.command(
ns(credentials.source),
startCommandDocument(credentials),
undefined
);
}
return result;
}
/**
* Finishes the callback authentication process.
*/
protected async finishAuthentication(
connection: Connection,
credentials: MongoCredentials,
token: string,
conversationId?: number
): Promise<void> {
await connection.command(
ns(credentials.source),
finishCommandDocument(token, conversationId),
undefined
);
}
/**
* Executes the callback and validates the output.
*/
protected async executeAndValidateCallback(params: OIDCCallbackParams): Promise<OIDCResponse> {
const result = await this.callback(params);
// Validate that the result returned by the callback is acceptable. If it is not
// we must clear the token result from the cache.
if (isCallbackResultInvalid(result)) {
throw new MongoMissingCredentialsError(CALLBACK_RESULT_ERROR);
}
return result;
}
/**
* Ensure the callback is only executed one at a time and throttles the calls
* to every 100ms.
*/
protected withLock(callback: OIDCCallbackFunction): OIDCCallbackFunction {
let lock: Promise<any> = Promise.resolve();
return async (params: OIDCCallbackParams): Promise<OIDCResponse> => {
// We do this to ensure that we would never return the result of the
// previous lock, only the current callback's value would get returned.
await lock;
lock = lock
.catch(() => null)
.then(async () => {
const difference = Date.now() - this.lastExecutionTime;
if (difference <= THROTTLE_MS) {
await setTimeout(THROTTLE_MS - difference, { signal: params.timeoutContext });
}
this.lastExecutionTime = Date.now();
return await callback(params);
});
return await lock;
};
}
}
/**
* Determines if a result returned from a request or refresh callback
* function is invalid. This means the result is nullish, doesn't contain
* the accessToken required field, and does not contain extra fields.
*/
function isCallbackResultInvalid(tokenResult: unknown): boolean {
if (tokenResult == null || typeof tokenResult !== 'object') return true;
if (!('accessToken' in tokenResult)) return true;
return !Object.getOwnPropertyNames(tokenResult).every(prop => RESULT_PROPERTIES.includes(prop));
}

View file

@ -0,0 +1,53 @@
import { Binary, BSON, type Document } from '../../../bson';
import { type MongoCredentials } from '../mongo_credentials';
import { AuthMechanism } from '../providers';
/** @internal */
export interface OIDCCommand {
saslStart?: number;
saslContinue?: number;
conversationId?: number;
mechanism?: string;
autoAuthorize?: number;
db?: string;
payload: Binary;
}
/**
* Generate the finishing command document for authentication. Will be a
* saslStart or saslContinue depending on the presence of a conversation id.
*/
export function finishCommandDocument(token: string, conversationId?: number): OIDCCommand {
if (conversationId != null) {
return {
saslContinue: 1,
conversationId: conversationId,
payload: new Binary(BSON.serialize({ jwt: token }))
};
}
// saslContinue requires a conversationId in the command to be valid so in this
// case the server allows "step two" to actually be a saslStart with the token
// as the jwt since the use of the cached value has no correlating conversating
// on the particular connection.
return {
saslStart: 1,
mechanism: AuthMechanism.MONGODB_OIDC,
payload: new Binary(BSON.serialize({ jwt: token }))
};
}
/**
* Generate the saslStart command document.
*/
export function startCommandDocument(credentials: MongoCredentials): OIDCCommand {
const payload: Document = {};
if (credentials.username) {
payload.n = credentials.username;
}
return {
saslStart: 1,
autoAuthorize: 1,
mechanism: AuthMechanism.MONGODB_OIDC,
payload: new Binary(BSON.serialize(payload))
};
}

View file

@ -0,0 +1,53 @@
import { MongoGCPError } from '../../../error';
import { get } from '../../../utils';
import { type MongoCredentials } from '../mongo_credentials';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** GCP base URL. */
const GCP_BASE_URL =
'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity';
/** GCP request headers. */
const GCP_HEADERS = Object.freeze({ 'Metadata-Flavor': 'Google' });
/** Error for when the token audience is missing in the environment. */
const TOKEN_RESOURCE_MISSING_ERROR =
'TOKEN_RESOURCE must be set in the auth mechanism properties when ENVIRONMENT is gcp.';
export class GCPMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(credentials?: MongoCredentials): Promise<AccessToken> {
const tokenAudience = credentials?.mechanismProperties.TOKEN_RESOURCE;
if (!tokenAudience) {
throw new MongoGCPError(TOKEN_RESOURCE_MISSING_ERROR);
}
return await getGcpTokenData(tokenAudience);
}
}
/**
* Hit the GCP endpoint to get the token data.
*/
async function getGcpTokenData(tokenAudience: string): Promise<AccessToken> {
const url = new URL(GCP_BASE_URL);
url.searchParams.append('audience', tokenAudience);
const response = await get(url, {
headers: GCP_HEADERS
});
if (response.status !== 200) {
throw new MongoGCPError(
`Status code ${response.status} returned from the GCP endpoint. Response body: ${response.body}`
);
}
return { access_token: response.body };
}

View file

@ -0,0 +1,141 @@
import { BSON } from '../../../bson';
import { MONGODB_ERROR_CODES, MongoError, MongoOIDCError } from '../../../error';
import { Timeout, TimeoutError } from '../../../timeout';
import { type Connection } from '../../connection';
import { type MongoCredentials } from '../mongo_credentials';
import {
type IdPInfo,
OIDC_VERSION,
type OIDCCallbackFunction,
type OIDCCallbackParams,
type OIDCResponse
} from '../mongodb_oidc';
import { CallbackWorkflow, HUMAN_TIMEOUT_MS } from './callback_workflow';
import { type TokenCache } from './token_cache';
/**
* Class implementing behaviour for the non human callback workflow.
* @internal
*/
export class HumanCallbackWorkflow extends CallbackWorkflow {
/**
* Instantiate the human callback workflow.
*/
constructor(cache: TokenCache, callback: OIDCCallbackFunction) {
super(cache, callback);
}
/**
* Execute the OIDC human callback workflow.
*/
async execute(connection: Connection, credentials: MongoCredentials): Promise<void> {
// Check if the Client Cache has an access token.
// If it does, cache the access token in the Connection Cache and perform a One-Step SASL conversation
// using the access token. If the server returns an Authentication error (18),
// invalidate the access token token from the Client Cache, clear the Connection Cache,
// and restart the authentication flow. Raise any other errors to the user. On success, exit the algorithm.
if (this.cache.hasAccessToken) {
const token = this.cache.getAccessToken();
connection.accessToken = token;
try {
return await this.finishAuthentication(connection, credentials, token);
} catch (error) {
if (
error instanceof MongoError &&
error.code === MONGODB_ERROR_CODES.AuthenticationFailed
) {
this.cache.removeAccessToken();
delete connection.accessToken;
return await this.execute(connection, credentials);
} else {
throw error;
}
}
}
// Check if the Client Cache has a refresh token.
// If it does, call the OIDC Human Callback with the cached refresh token and IdpInfo to get a
// new access token. Cache the new access token in the Client Cache and Connection Cache.
// Perform a One-Step SASL conversation using the new access token. If the the server returns
// an Authentication error (18), clear the refresh token, invalidate the access token from the
// Client Cache, clear the Connection Cache, and restart the authentication flow. Raise any other
// errors to the user. On success, exit the algorithm.
if (this.cache.hasRefreshToken) {
const refreshToken = this.cache.getRefreshToken();
const result = await this.fetchAccessToken(
this.cache.getIdpInfo(),
credentials,
refreshToken
);
this.cache.put(result);
connection.accessToken = result.accessToken;
try {
return await this.finishAuthentication(connection, credentials, result.accessToken);
} catch (error) {
if (
error instanceof MongoError &&
error.code === MONGODB_ERROR_CODES.AuthenticationFailed
) {
this.cache.removeRefreshToken();
delete connection.accessToken;
return await this.execute(connection, credentials);
} else {
throw error;
}
}
}
// Start a new Two-Step SASL conversation.
// Run a PrincipalStepRequest to get the IdpInfo.
// Call the OIDC Human Callback with the new IdpInfo to get a new access token and optional refresh
// token. Drivers MUST NOT pass a cached refresh token to the callback when performing
// a new Two-Step conversation. Cache the new IdpInfo and refresh token in the Client Cache and the
// new access token in the Client Cache and Connection Cache.
// Attempt to authenticate using a JwtStepRequest with the new access token. Raise any errors to the user.
const startResponse = await this.startAuthentication(connection, credentials);
const conversationId = startResponse.conversationId;
const idpInfo = BSON.deserialize(startResponse.payload.buffer) as IdPInfo;
const callbackResponse = await this.fetchAccessToken(idpInfo, credentials);
this.cache.put(callbackResponse, idpInfo);
connection.accessToken = callbackResponse.accessToken;
return await this.finishAuthentication(
connection,
credentials,
callbackResponse.accessToken,
conversationId
);
}
/**
* Fetches an access token using the callback.
*/
private async fetchAccessToken(
idpInfo: IdPInfo,
credentials: MongoCredentials,
refreshToken?: string
): Promise<OIDCResponse> {
const controller = new AbortController();
const params: OIDCCallbackParams = {
timeoutContext: controller.signal,
version: OIDC_VERSION,
idpInfo: idpInfo
};
if (credentials.username) {
params.username = credentials.username;
}
if (refreshToken) {
params.refreshToken = refreshToken;
}
const timeout = Timeout.expires(HUMAN_TIMEOUT_MS);
try {
return await Promise.race([this.executeAndValidateCallback(params), timeout]);
} catch (error) {
if (TimeoutError.is(error)) {
controller.abort();
throw new MongoOIDCError(`OIDC callback timed out after ${HUMAN_TIMEOUT_MS}ms.`);
}
throw error;
} finally {
timeout.clear();
}
}
}

View file

@ -0,0 +1,38 @@
import { readFile } from 'fs/promises';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** The fallback file name */
const FALLBACK_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/token';
/** The azure environment variable for the file name. */
const AZURE_FILENAME = 'AZURE_FEDERATED_TOKEN_FILE';
/** The AWS environment variable for the file name. */
const AWS_FILENAME = 'AWS_WEB_IDENTITY_TOKEN_FILE';
export class K8SMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(): Promise<AccessToken> {
let filename: string;
if (process.env[AZURE_FILENAME]) {
filename = process.env[AZURE_FILENAME];
} else if (process.env[AWS_FILENAME]) {
filename = process.env[AWS_FILENAME];
} else {
filename = FALLBACK_FILENAME;
}
const token = await readFile(filename, 'utf8');
return { access_token: token };
}
}

View file

@ -0,0 +1,142 @@
import { setTimeout } from 'timers/promises';
import { type Document } from '../../../bson';
import { ns } from '../../../utils';
import type { Connection } from '../../connection';
import type { MongoCredentials } from '../mongo_credentials';
import type { Workflow } from '../mongodb_oidc';
import { finishCommandDocument } from './command_builders';
import { type TokenCache } from './token_cache';
/** The time to throttle callback calls. */
const THROTTLE_MS = 100;
/**
* The access token format.
* @internal
*/
export interface AccessToken {
access_token: string;
expires_in?: number;
}
/** @internal */
export type OIDCTokenFunction = (credentials: MongoCredentials) => Promise<AccessToken>;
/**
* Common behaviour for OIDC machine workflows.
* @internal
*/
export abstract class MachineWorkflow implements Workflow {
cache: TokenCache;
callback: OIDCTokenFunction;
lastExecutionTime: number;
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
this.cache = cache;
this.callback = this.withLock(this.getToken.bind(this));
this.lastExecutionTime = Date.now() - THROTTLE_MS;
}
/**
* Execute the workflow. Gets the token from the subclass implementation.
*/
async execute(connection: Connection, credentials: MongoCredentials): Promise<void> {
const token = await this.getTokenFromCacheOrEnv(connection, credentials);
const command = finishCommandDocument(token);
await connection.command(ns(credentials.source), command, undefined);
}
/**
* Reauthenticate on a machine workflow just grabs the token again since the server
* has said the current access token is invalid or expired.
*/
async reauthenticate(connection: Connection, credentials: MongoCredentials): Promise<void> {
if (this.cache.hasAccessToken) {
// Reauthentication implies the token has expired.
if (connection.accessToken === this.cache.getAccessToken()) {
// If connection's access token is the same as the cache's, remove
// the token from the cache and connection.
this.cache.removeAccessToken();
delete connection.accessToken;
} else {
// If the connection's access token is different from the cache's, set
// the cache's token on the connection and do not remove from the
// cache.
connection.accessToken = this.cache.getAccessToken();
}
}
await this.execute(connection, credentials);
}
/**
* Get the document to add for speculative authentication.
*/
async speculativeAuth(connection: Connection, credentials: MongoCredentials): Promise<Document> {
// The spec states only cached access tokens can use speculative auth.
if (!this.cache.hasAccessToken) {
return {};
}
const token = await this.getTokenFromCacheOrEnv(connection, credentials);
const document = finishCommandDocument(token);
document.db = credentials.source;
return { speculativeAuthenticate: document };
}
/**
* Get the token from the cache or environment.
*/
private async getTokenFromCacheOrEnv(
connection: Connection,
credentials: MongoCredentials
): Promise<string> {
if (this.cache.hasAccessToken) {
const token = this.cache.getAccessToken();
// New connections won't have an access token so ensure we set here.
if (!connection.accessToken) {
connection.accessToken = token;
}
return token;
} else {
const token = await this.callback(credentials);
this.cache.put({ accessToken: token.access_token, expiresInSeconds: token.expires_in });
// Put the access token on the connection as well.
connection.accessToken = token.access_token;
return token.access_token;
}
}
/**
* Ensure the callback is only executed one at a time, and throttled to
* only once per 100ms.
*/
private withLock(callback: OIDCTokenFunction): OIDCTokenFunction {
let lock: Promise<any> = Promise.resolve();
return async (credentials: MongoCredentials): Promise<AccessToken> => {
// We do this to ensure that we would never return the result of the
// previous lock, only the current callback's value would get returned.
await lock;
lock = lock
.catch(() => null)
.then(async () => {
const difference = Date.now() - this.lastExecutionTime;
if (difference <= THROTTLE_MS) {
await setTimeout(THROTTLE_MS - difference);
}
this.lastExecutionTime = Date.now();
return await callback(credentials);
});
return await lock;
};
}
/**
* Get the token from the environment or endpoint.
*/
abstract getToken(credentials: MongoCredentials): Promise<AccessToken>;
}

View file

@ -0,0 +1,62 @@
import { MongoDriverError } from '../../../error';
import type { IdPInfo, OIDCResponse } from '../mongodb_oidc';
class MongoOIDCError extends MongoDriverError {}
/** @internal */
export class TokenCache {
private accessToken?: string;
private refreshToken?: string;
private idpInfo?: IdPInfo;
private expiresInSeconds?: number;
get hasAccessToken(): boolean {
return !!this.accessToken;
}
get hasRefreshToken(): boolean {
return !!this.refreshToken;
}
get hasIdpInfo(): boolean {
return !!this.idpInfo;
}
getAccessToken(): string {
if (!this.accessToken) {
throw new MongoOIDCError('Attempted to get an access token when none exists.');
}
return this.accessToken;
}
getRefreshToken(): string {
if (!this.refreshToken) {
throw new MongoOIDCError('Attempted to get a refresh token when none exists.');
}
return this.refreshToken;
}
getIdpInfo(): IdPInfo {
if (!this.idpInfo) {
throw new MongoOIDCError('Attempted to get IDP information when none exists.');
}
return this.idpInfo;
}
put(response: OIDCResponse, idpInfo?: IdPInfo) {
this.accessToken = response.accessToken;
this.refreshToken = response.refreshToken;
this.expiresInSeconds = response.expiresInSeconds;
if (idpInfo) {
this.idpInfo = idpInfo;
}
}
removeAccessToken() {
this.accessToken = undefined;
}
removeRefreshToken() {
this.refreshToken = undefined;
}
}

View file

@ -0,0 +1,34 @@
import * as fs from 'fs';
import { MongoAWSError } from '../../../error';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** Error for when the token is missing in the environment. */
const TOKEN_MISSING_ERROR = 'OIDC_TOKEN_FILE must be set in the environment.';
/**
* Device workflow implementation for AWS.
*
* @internal
*/
export class TokenMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(): Promise<AccessToken> {
const tokenFile = process.env.OIDC_TOKEN_FILE;
if (!tokenFile) {
throw new MongoAWSError(TOKEN_MISSING_ERROR);
}
const token = await fs.promises.readFile(tokenFile, 'utf8');
return { access_token: token };
}
}

View file

@ -0,0 +1,25 @@
import { Binary } from '../../bson';
import { MongoMissingCredentialsError } from '../../error';
import { ns } from '../../utils';
import { type AuthContext, AuthProvider } from './auth_provider';
export class Plain extends AuthProvider {
override async auth(authContext: AuthContext): Promise<void> {
const { connection, credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
const { username, password } = credentials;
const payload = new Binary(Buffer.from(`\x00${username}\x00${password}`));
const command = {
saslStart: 1,
mechanism: 'PLAIN',
payload: payload,
autoAuthorize: 1
};
await connection.command(ns('$external.$cmd'), command, undefined);
}
}

View file

@ -0,0 +1,23 @@
/** @public */
export const AuthMechanism = Object.freeze({
MONGODB_AWS: 'MONGODB-AWS',
MONGODB_CR: 'MONGODB-CR',
MONGODB_DEFAULT: 'DEFAULT',
MONGODB_GSSAPI: 'GSSAPI',
MONGODB_PLAIN: 'PLAIN',
MONGODB_SCRAM_SHA1: 'SCRAM-SHA-1',
MONGODB_SCRAM_SHA256: 'SCRAM-SHA-256',
MONGODB_X509: 'MONGODB-X509',
MONGODB_OIDC: 'MONGODB-OIDC'
} as const);
/** @public */
export type AuthMechanism = (typeof AuthMechanism)[keyof typeof AuthMechanism];
/** @internal */
export const AUTH_MECHS_AUTH_SRC_EXTERNAL = new Set<AuthMechanism>([
AuthMechanism.MONGODB_GSSAPI,
AuthMechanism.MONGODB_AWS,
AuthMechanism.MONGODB_OIDC,
AuthMechanism.MONGODB_X509
]);

View file

@ -0,0 +1,344 @@
import { saslprep } from '@mongodb-js/saslprep';
import * as crypto from 'crypto';
import { Binary, type Document } from '../../bson';
import {
MongoInvalidArgumentError,
MongoMissingCredentialsError,
MongoRuntimeError
} from '../../error';
import { ns, randomBytes } from '../../utils';
import type { HandshakeDocument } from '../connect';
import { type AuthContext, AuthProvider } from './auth_provider';
import type { MongoCredentials } from './mongo_credentials';
import { AuthMechanism } from './providers';
type CryptoMethod = 'sha1' | 'sha256';
class ScramSHA extends AuthProvider {
cryptoMethod: CryptoMethod;
constructor(cryptoMethod: CryptoMethod) {
super();
this.cryptoMethod = cryptoMethod || 'sha1';
}
override async prepare(
handshakeDoc: HandshakeDocument,
authContext: AuthContext
): Promise<HandshakeDocument> {
const cryptoMethod = this.cryptoMethod;
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
const nonce = await randomBytes(24);
// store the nonce for later use
authContext.nonce = nonce;
const request = {
...handshakeDoc,
speculativeAuthenticate: {
...makeFirstMessage(cryptoMethod, credentials, nonce),
db: credentials.source
}
};
return request;
}
override async auth(authContext: AuthContext) {
const { reauthenticating, response } = authContext;
if (response?.speculativeAuthenticate && !reauthenticating) {
return await continueScramConversation(
this.cryptoMethod,
response.speculativeAuthenticate,
authContext
);
}
return await executeScram(this.cryptoMethod, authContext);
}
}
function cleanUsername(username: string) {
return username.replace('=', '=3D').replace(',', '=2C');
}
function clientFirstMessageBare(username: string, nonce: Buffer) {
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
// Since the username is not sasl-prep-d, we need to do this here.
return Buffer.concat([
Buffer.from('n=', 'utf8'),
Buffer.from(username, 'utf8'),
Buffer.from(',r=', 'utf8'),
Buffer.from(nonce.toString('base64'), 'utf8')
]);
}
function makeFirstMessage(
cryptoMethod: CryptoMethod,
credentials: MongoCredentials,
nonce: Buffer
) {
const username = cleanUsername(credentials.username);
const mechanism =
cryptoMethod === 'sha1' ? AuthMechanism.MONGODB_SCRAM_SHA1 : AuthMechanism.MONGODB_SCRAM_SHA256;
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
// Since the username is not sasl-prep-d, we need to do this here.
return {
saslStart: 1,
mechanism,
payload: new Binary(
Buffer.concat([Buffer.from('n,,', 'utf8'), clientFirstMessageBare(username, nonce)])
),
autoAuthorize: 1,
options: { skipEmptyExchange: true }
};
}
async function executeScram(cryptoMethod: CryptoMethod, authContext: AuthContext): Promise<void> {
const { connection, credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
if (!authContext.nonce) {
throw new MongoInvalidArgumentError('AuthContext must contain a valid nonce property');
}
const nonce = authContext.nonce;
const db = credentials.source;
const saslStartCmd = makeFirstMessage(cryptoMethod, credentials, nonce);
const response = await connection.command(ns(`${db}.$cmd`), saslStartCmd, undefined);
await continueScramConversation(cryptoMethod, response, authContext);
}
async function continueScramConversation(
cryptoMethod: CryptoMethod,
response: Document,
authContext: AuthContext
): Promise<void> {
const connection = authContext.connection;
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
if (!authContext.nonce) {
throw new MongoInvalidArgumentError('Unable to continue SCRAM without valid nonce');
}
const nonce = authContext.nonce;
const db = credentials.source;
const username = cleanUsername(credentials.username);
const password = credentials.password;
const processedPassword =
cryptoMethod === 'sha256' ? saslprep(password) : passwordDigest(username, password);
const payload: Binary = Buffer.isBuffer(response.payload)
? new Binary(response.payload)
: response.payload;
const dict = parsePayload(payload);
const iterations = parseInt(dict.i, 10);
if (iterations && iterations < 4096) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Server returned an invalid iteration count ${iterations}`);
}
const salt = dict.s;
const rnonce = dict.r;
if (rnonce.startsWith('nonce')) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Server returned an invalid nonce: ${rnonce}`);
}
// Set up start of proof
const withoutProof = `c=biws,r=${rnonce}`;
const saltedPassword = HI(
processedPassword,
Buffer.from(salt, 'base64'),
iterations,
cryptoMethod
);
const clientKey = HMAC(cryptoMethod, saltedPassword, 'Client Key');
const serverKey = HMAC(cryptoMethod, saltedPassword, 'Server Key');
const storedKey = H(cryptoMethod, clientKey);
const authMessage = [
clientFirstMessageBare(username, nonce),
payload.toString('utf8'),
withoutProof
].join(',');
const clientSignature = HMAC(cryptoMethod, storedKey, authMessage);
const clientProof = `p=${xor(clientKey, clientSignature)}`;
const clientFinal = [withoutProof, clientProof].join(',');
const serverSignature = HMAC(cryptoMethod, serverKey, authMessage);
const saslContinueCmd = {
saslContinue: 1,
conversationId: response.conversationId,
payload: new Binary(Buffer.from(clientFinal))
};
const r = await connection.command(ns(`${db}.$cmd`), saslContinueCmd, undefined);
const parsedResponse = parsePayload(r.payload);
if (!compareDigest(Buffer.from(parsedResponse.v, 'base64'), serverSignature)) {
throw new MongoRuntimeError('Server returned an invalid signature');
}
if (r.done !== false) {
// If the server sends r.done === true we can save one RTT
return;
}
const retrySaslContinueCmd = {
saslContinue: 1,
conversationId: r.conversationId,
payload: Buffer.alloc(0)
};
await connection.command(ns(`${db}.$cmd`), retrySaslContinueCmd, undefined);
}
function parsePayload(payload: Binary) {
const payloadStr = payload.toString('utf8');
const dict: Document = {};
const parts = payloadStr.split(',');
for (let i = 0; i < parts.length; i++) {
const valueParts = (parts[i].match(/^([^=]*)=(.*)$/) ?? []).slice(1);
dict[valueParts[0]] = valueParts[1];
}
return dict;
}
function passwordDigest(username: string, password: string) {
if (typeof username !== 'string') {
throw new MongoInvalidArgumentError('Username must be a string');
}
if (typeof password !== 'string') {
throw new MongoInvalidArgumentError('Password must be a string');
}
if (password.length === 0) {
throw new MongoInvalidArgumentError('Password cannot be empty');
}
let md5: crypto.Hash;
try {
md5 = crypto.createHash('md5');
} catch (err) {
if (crypto.getFips()) {
// This error is (slightly) more helpful than what comes from OpenSSL directly, e.g.
// 'Error: error:060800C8:digital envelope routines:EVP_DigestInit_ex:disabled for FIPS'
throw new Error('Auth mechanism SCRAM-SHA-1 is not supported in FIPS mode');
}
throw err;
}
md5.update(`${username}:mongo:${password}`, 'utf8');
return md5.digest('hex');
}
// XOR two buffers
function xor(a: Buffer, b: Buffer) {
if (!Buffer.isBuffer(a)) {
a = Buffer.from(a);
}
if (!Buffer.isBuffer(b)) {
b = Buffer.from(b);
}
const length = Math.max(a.length, b.length);
const res = [];
for (let i = 0; i < length; i += 1) {
res.push(a[i] ^ b[i]);
}
return Buffer.from(res).toString('base64');
}
function H(method: CryptoMethod, text: Buffer) {
return crypto.createHash(method).update(text).digest();
}
function HMAC(method: CryptoMethod, key: Buffer, text: Buffer | string) {
return crypto.createHmac(method, key).update(text).digest();
}
interface HICache {
[key: string]: Buffer;
}
let _hiCache: HICache = {};
let _hiCacheCount = 0;
function _hiCachePurge() {
_hiCache = {};
_hiCacheCount = 0;
}
const hiLengthMap = {
sha256: 32,
sha1: 20
};
function HI(data: string, salt: Buffer, iterations: number, cryptoMethod: CryptoMethod) {
// omit the work if already generated
const key = [data, salt.toString('base64'), iterations].join('_');
if (_hiCache[key] != null) {
return _hiCache[key];
}
// generate the salt
const saltedData = crypto.pbkdf2Sync(
data,
salt,
iterations,
hiLengthMap[cryptoMethod],
cryptoMethod
);
// cache a copy to speed up the next lookup, but prevent unbounded cache growth
if (_hiCacheCount >= 200) {
_hiCachePurge();
}
_hiCache[key] = saltedData;
_hiCacheCount += 1;
return saltedData;
}
function compareDigest(lhs: Buffer, rhs: Uint8Array) {
if (lhs.length !== rhs.length) {
return false;
}
if (typeof crypto.timingSafeEqual === 'function') {
return crypto.timingSafeEqual(lhs, rhs);
}
let result = 0;
for (let i = 0; i < lhs.length; i++) {
result |= lhs[i] ^ rhs[i];
}
return result === 0;
}
export class ScramSHA1 extends ScramSHA {
constructor() {
super('sha1');
}
}
export class ScramSHA256 extends ScramSHA {
constructor() {
super('sha256');
}
}

View file

@ -0,0 +1,43 @@
import type { Document } from '../../bson';
import { MongoMissingCredentialsError } from '../../error';
import { ns } from '../../utils';
import type { HandshakeDocument } from '../connect';
import { type AuthContext, AuthProvider } from './auth_provider';
import type { MongoCredentials } from './mongo_credentials';
export class X509 extends AuthProvider {
override async prepare(
handshakeDoc: HandshakeDocument,
authContext: AuthContext
): Promise<HandshakeDocument> {
const { credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
return { ...handshakeDoc, speculativeAuthenticate: x509AuthenticateCommand(credentials) };
}
override async auth(authContext: AuthContext) {
const connection = authContext.connection;
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
const response = authContext.response;
if (response?.speculativeAuthenticate) {
return;
}
await connection.command(ns('$external.$cmd'), x509AuthenticateCommand(credentials), undefined);
}
}
function x509AuthenticateCommand(credentials: MongoCredentials) {
const command: Document = { authenticate: 1, mechanism: 'MONGODB-X509' };
if (credentials.username) {
command.user = credentials.username;
}
return command;
}

View file

@ -0,0 +1,312 @@
import { type Document, type ObjectId } from '../bson';
import {
COMMAND_FAILED,
COMMAND_STARTED,
COMMAND_SUCCEEDED,
LEGACY_HELLO_COMMAND,
LEGACY_HELLO_COMMAND_CAMEL_CASE
} from '../constants';
import { calculateDurationInMs } from '../utils';
import {
DocumentSequence,
OpMsgRequest,
type OpQueryRequest,
type WriteProtocolMessageType
} from './commands';
import type { Connection } from './connection';
/**
* An event indicating the start of a given command
* @public
* @category Event
*/
export class CommandStartedEvent {
commandObj?: Document;
requestId: number;
databaseName: string;
commandName: string;
command: Document;
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId"
* from the server on 4.2+.
*/
serverConnectionId: bigint | null;
serviceId?: ObjectId;
/** @internal */
name = COMMAND_STARTED;
/**
* Create a started event
*
* @internal
* @param pool - the pool that originated the command
* @param command - the command
*/
constructor(
connection: Connection,
command: WriteProtocolMessageType,
serverConnectionId: bigint | null
) {
const cmd = extractCommand(command);
const commandName = extractCommandName(cmd);
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
// TODO: remove in major revision, this is not spec behavior
if (SENSITIVE_COMMANDS.has(commandName)) {
this.commandObj = {};
this.commandObj[commandName] = true;
}
this.address = address;
this.connectionId = connectionId;
this.serviceId = serviceId;
this.requestId = command.requestId;
this.databaseName = command.databaseName;
this.commandName = commandName;
this.command = maybeRedact(commandName, cmd, cmd);
this.serverConnectionId = serverConnectionId;
}
/* @internal */
get hasServiceId(): boolean {
return !!this.serviceId;
}
}
/**
* An event indicating the success of a given command
* @public
* @category Event
*/
export class CommandSucceededEvent {
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId" from the server on 4.2+.
*/
serverConnectionId: bigint | null;
requestId: number;
duration: number;
commandName: string;
reply: unknown;
serviceId?: ObjectId;
/** @internal */
name = COMMAND_SUCCEEDED;
/**
* Create a succeeded event
*
* @internal
* @param pool - the pool that originated the command
* @param command - the command
* @param reply - the reply for this command from the server
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
*/
constructor(
connection: Connection,
command: WriteProtocolMessageType,
reply: Document | undefined,
started: number,
serverConnectionId: bigint | null
) {
const cmd = extractCommand(command);
const commandName = extractCommandName(cmd);
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
this.address = address;
this.connectionId = connectionId;
this.serviceId = serviceId;
this.requestId = command.requestId;
this.commandName = commandName;
this.duration = calculateDurationInMs(started);
this.reply = maybeRedact(commandName, cmd, extractReply(reply));
this.serverConnectionId = serverConnectionId;
}
/* @internal */
get hasServiceId(): boolean {
return !!this.serviceId;
}
}
/**
* An event indicating the failure of a given command
* @public
* @category Event
*/
export class CommandFailedEvent {
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId" from the server on 4.2+.
*/
serverConnectionId: bigint | null;
requestId: number;
duration: number;
commandName: string;
failure: Error;
serviceId?: ObjectId;
/** @internal */
name = COMMAND_FAILED;
/**
* Create a failure event
*
* @internal
* @param pool - the pool that originated the command
* @param command - the command
* @param error - the generated error or a server error response
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
*/
constructor(
connection: Connection,
command: WriteProtocolMessageType,
error: Error | Document,
started: number,
serverConnectionId: bigint | null
) {
const cmd = extractCommand(command);
const commandName = extractCommandName(cmd);
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
this.address = address;
this.connectionId = connectionId;
this.serviceId = serviceId;
this.requestId = command.requestId;
this.commandName = commandName;
this.duration = calculateDurationInMs(started);
this.failure = maybeRedact(commandName, cmd, error) as Error;
this.serverConnectionId = serverConnectionId;
}
/* @internal */
get hasServiceId(): boolean {
return !!this.serviceId;
}
}
/**
* Commands that we want to redact because of the sensitive nature of their contents
* @internal
*/
export const SENSITIVE_COMMANDS = new Set([
'authenticate',
'saslStart',
'saslContinue',
'getnonce',
'createUser',
'updateUser',
'copydbgetnonce',
'copydbsaslstart',
'copydb'
]);
const HELLO_COMMANDS = new Set(['hello', LEGACY_HELLO_COMMAND, LEGACY_HELLO_COMMAND_CAMEL_CASE]);
// helper methods
const extractCommandName = (commandDoc: Document) => Object.keys(commandDoc)[0];
const collectionName = (command: OpQueryRequest) => command.ns.split('.')[1];
const maybeRedact = (commandName: string, commandDoc: Document, result: Error | Document) =>
SENSITIVE_COMMANDS.has(commandName) ||
(HELLO_COMMANDS.has(commandName) && commandDoc.speculativeAuthenticate)
? {}
: result;
const LEGACY_FIND_QUERY_MAP: { [key: string]: string } = {
$query: 'filter',
$orderby: 'sort',
$hint: 'hint',
$comment: 'comment',
$maxScan: 'maxScan',
$max: 'max',
$min: 'min',
$returnKey: 'returnKey',
$showDiskLoc: 'showRecordId',
$maxTimeMS: 'maxTimeMS',
$snapshot: 'snapshot'
};
const LEGACY_FIND_OPTIONS_MAP = {
numberToSkip: 'skip',
numberToReturn: 'batchSize',
returnFieldSelector: 'projection'
} as const;
/** Extract the actual command from the query, possibly up-converting if it's a legacy format */
function extractCommand(command: WriteProtocolMessageType): Document {
if (command instanceof OpMsgRequest) {
const cmd = { ...command.command };
// For OP_MSG with payload type 1 we need to pull the documents
// array out of the document sequence for monitoring.
if (cmd.ops instanceof DocumentSequence) {
cmd.ops = cmd.ops.documents;
}
if (cmd.nsInfo instanceof DocumentSequence) {
cmd.nsInfo = cmd.nsInfo.documents;
}
return cmd;
}
if (command.query?.$query) {
let result: Document;
if (command.ns === 'admin.$cmd') {
// up-convert legacy command
result = Object.assign({}, command.query.$query);
} else {
// up-convert legacy find command
result = { find: collectionName(command) };
Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => {
if (command.query[key] != null) {
result[LEGACY_FIND_QUERY_MAP[key]] = { ...command.query[key] };
}
});
}
Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => {
const legacyKey = key as keyof typeof LEGACY_FIND_OPTIONS_MAP;
if (command[legacyKey] != null) {
result[LEGACY_FIND_OPTIONS_MAP[legacyKey]] = command[legacyKey];
}
});
return result;
}
let clonedQuery: Record<string, unknown> = {};
const clonedCommand: Record<string, unknown> = { ...command };
if (command.query) {
clonedQuery = { ...command.query };
clonedCommand.query = clonedQuery;
}
return command.query ? clonedQuery : clonedCommand;
}
function extractReply(reply?: Document) {
if (!reply) {
return reply;
}
return reply.result ? reply.result : reply;
}
function extractConnectionDetails(connection: Connection) {
let connectionId;
if ('id' in connection) {
connectionId = connection.id;
}
return {
address: connection.address,
serviceId: connection.serviceId,
connectionId
};
}

View file

@ -0,0 +1,773 @@
import type { BSONSerializeOptions, Document, Long } from '../bson';
import * as BSON from '../bson';
import { MongoInvalidArgumentError, MongoRuntimeError } from '../error';
import { type ReadPreference } from '../read_preference';
import type { ClientSession } from '../sessions';
import type { CommandOptions } from './connection';
import {
compress,
Compressor,
type CompressorName,
uncompressibleCommands
} from './wire_protocol/compression';
import { OP_COMPRESSED, OP_MSG, OP_QUERY } from './wire_protocol/constants';
// Incrementing request id
let _requestId = 0;
// Query flags
const OPTS_TAILABLE_CURSOR = 2;
const OPTS_SECONDARY = 4;
const OPTS_OPLOG_REPLAY = 8;
const OPTS_NO_CURSOR_TIMEOUT = 16;
const OPTS_AWAIT_DATA = 32;
const OPTS_EXHAUST = 64;
const OPTS_PARTIAL = 128;
// Response flags
const CURSOR_NOT_FOUND = 1;
const QUERY_FAILURE = 2;
const SHARD_CONFIG_STALE = 4;
const AWAIT_CAPABLE = 8;
const encodeUTF8Into = BSON.BSON.onDemand.ByteUtils.encodeUTF8Into;
/** @internal */
export type WriteProtocolMessageType = OpQueryRequest | OpMsgRequest;
/** @internal */
export interface OpQueryOptions extends CommandOptions {
socketTimeoutMS?: number;
session?: ClientSession;
numberToSkip?: number;
numberToReturn?: number;
returnFieldSelector?: Document;
pre32Limit?: number;
serializeFunctions?: boolean;
ignoreUndefined?: boolean;
maxBsonSize?: number;
checkKeys?: boolean;
secondaryOk?: boolean;
requestId?: number;
moreToCome?: boolean;
exhaustAllowed?: boolean;
}
/** @internal */
export class OpQueryRequest {
ns: string;
numberToSkip: number;
numberToReturn: number;
returnFieldSelector?: Document;
requestId: number;
pre32Limit?: number;
serializeFunctions: boolean;
ignoreUndefined: boolean;
maxBsonSize: number;
checkKeys: boolean;
batchSize: number;
tailable: boolean;
secondaryOk: boolean;
oplogReplay: boolean;
noCursorTimeout: boolean;
awaitData: boolean;
exhaust: boolean;
partial: boolean;
/** moreToCome is an OP_MSG only concept */
moreToCome = false;
constructor(
public databaseName: string,
public query: Document,
options: OpQueryOptions
) {
// Basic options needed to be passed in
// TODO(NODE-3483): Replace with MongoCommandError
const ns = `${databaseName}.$cmd`;
if (typeof databaseName !== 'string') {
throw new MongoRuntimeError('Database name must be a string for a query');
}
// TODO(NODE-3483): Replace with MongoCommandError
if (query == null) throw new MongoRuntimeError('A query document must be specified for query');
// Validate that we are not passing 0x00 in the collection name
if (ns.indexOf('\x00') !== -1) {
// TODO(NODE-3483): Use MongoNamespace static method
throw new MongoRuntimeError('Namespace cannot contain a null character');
}
// Basic options
this.ns = ns;
// Additional options
this.numberToSkip = options.numberToSkip || 0;
this.numberToReturn = options.numberToReturn || 0;
this.returnFieldSelector = options.returnFieldSelector || undefined;
this.requestId = options.requestId ?? OpQueryRequest.getRequestId();
// special case for pre-3.2 find commands, delete ASAP
this.pre32Limit = options.pre32Limit;
// Serialization option
this.serializeFunctions =
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined =
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
this.batchSize = this.numberToReturn;
// Flags
this.tailable = false;
this.secondaryOk = typeof options.secondaryOk === 'boolean' ? options.secondaryOk : false;
this.oplogReplay = false;
this.noCursorTimeout = false;
this.awaitData = false;
this.exhaust = false;
this.partial = false;
}
/** Assign next request Id. */
incRequestId(): void {
this.requestId = _requestId++;
}
/** Peek next request Id. */
nextRequestId(): number {
return _requestId + 1;
}
/** Increment then return next request Id. */
static getRequestId(): number {
return ++_requestId;
}
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
toBin(): Uint8Array[] {
const buffers = [];
let projection = null;
// Set up the flags
let flags = 0;
if (this.tailable) {
flags |= OPTS_TAILABLE_CURSOR;
}
if (this.secondaryOk) {
flags |= OPTS_SECONDARY;
}
if (this.oplogReplay) {
flags |= OPTS_OPLOG_REPLAY;
}
if (this.noCursorTimeout) {
flags |= OPTS_NO_CURSOR_TIMEOUT;
}
if (this.awaitData) {
flags |= OPTS_AWAIT_DATA;
}
if (this.exhaust) {
flags |= OPTS_EXHAUST;
}
if (this.partial) {
flags |= OPTS_PARTIAL;
}
// If batchSize is different to this.numberToReturn
if (this.batchSize !== this.numberToReturn) this.numberToReturn = this.batchSize;
// Allocate write protocol header buffer
const header = Buffer.alloc(
4 * 4 + // Header
4 + // Flags
Buffer.byteLength(this.ns) +
1 + // namespace
4 + // numberToSkip
4 // numberToReturn
);
// Add header to buffers
buffers.push(header);
// Serialize the query
const query = BSON.serialize(this.query, {
checkKeys: this.checkKeys,
serializeFunctions: this.serializeFunctions,
ignoreUndefined: this.ignoreUndefined
});
// Add query document
buffers.push(query);
if (this.returnFieldSelector && Object.keys(this.returnFieldSelector).length > 0) {
// Serialize the projection document
projection = BSON.serialize(this.returnFieldSelector, {
checkKeys: this.checkKeys,
serializeFunctions: this.serializeFunctions,
ignoreUndefined: this.ignoreUndefined
});
// Add projection document
buffers.push(projection);
}
// Total message size
const totalLength = header.length + query.length + (projection ? projection.length : 0);
// Set up the index
let index = 4;
// Write total document length
header[3] = (totalLength >> 24) & 0xff;
header[2] = (totalLength >> 16) & 0xff;
header[1] = (totalLength >> 8) & 0xff;
header[0] = totalLength & 0xff;
// Write header information requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = this.requestId & 0xff;
index = index + 4;
// Write header information responseTo
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = 0 & 0xff;
index = index + 4;
// Write header information OP_QUERY
header[index + 3] = (OP_QUERY >> 24) & 0xff;
header[index + 2] = (OP_QUERY >> 16) & 0xff;
header[index + 1] = (OP_QUERY >> 8) & 0xff;
header[index] = OP_QUERY & 0xff;
index = index + 4;
// Write header information flags
header[index + 3] = (flags >> 24) & 0xff;
header[index + 2] = (flags >> 16) & 0xff;
header[index + 1] = (flags >> 8) & 0xff;
header[index] = flags & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Write header information flags numberToSkip
header[index + 3] = (this.numberToSkip >> 24) & 0xff;
header[index + 2] = (this.numberToSkip >> 16) & 0xff;
header[index + 1] = (this.numberToSkip >> 8) & 0xff;
header[index] = this.numberToSkip & 0xff;
index = index + 4;
// Write header information flags numberToReturn
header[index + 3] = (this.numberToReturn >> 24) & 0xff;
header[index + 2] = (this.numberToReturn >> 16) & 0xff;
header[index + 1] = (this.numberToReturn >> 8) & 0xff;
header[index] = this.numberToReturn & 0xff;
index = index + 4;
// Return the buffers
return buffers;
}
}
/** @internal */
export interface MessageHeader {
length: number;
requestId: number;
responseTo: number;
opCode: number;
fromCompressed?: boolean;
}
/** @internal */
export class OpReply {
parsed: boolean;
raw: Buffer;
data: Buffer;
opts: BSONSerializeOptions;
length: number;
requestId: number;
responseTo: number;
opCode: number;
fromCompressed?: boolean;
responseFlags?: number;
cursorId?: Long;
startingFrom?: number;
numberReturned?: number;
cursorNotFound?: boolean;
queryFailure?: boolean;
shardConfigStale?: boolean;
awaitCapable?: boolean;
useBigInt64: boolean;
promoteLongs: boolean;
promoteValues: boolean;
promoteBuffers: boolean;
bsonRegExp?: boolean;
index = 0;
sections: Uint8Array[] = [];
/** moreToCome is an OP_MSG only concept */
moreToCome = false;
constructor(
message: Buffer,
msgHeader: MessageHeader,
msgBody: Buffer,
opts?: BSONSerializeOptions
) {
this.parsed = false;
this.raw = message;
this.data = msgBody;
this.opts = opts ?? {
useBigInt64: false,
promoteLongs: true,
promoteValues: true,
promoteBuffers: false,
bsonRegExp: false
};
// Read the message header
this.length = msgHeader.length;
this.requestId = msgHeader.requestId;
this.responseTo = msgHeader.responseTo;
this.opCode = msgHeader.opCode;
this.fromCompressed = msgHeader.fromCompressed;
// Flag values
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
this.promoteValues =
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
this.promoteBuffers =
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
}
isParsed(): boolean {
return this.parsed;
}
parse(): Uint8Array {
// Don't parse again if not needed
if (this.parsed) return this.sections[0];
// Position within OP_REPLY at which documents start
// (See https://www.mongodb.com/docs/manual/reference/mongodb-wire-protocol/#wire-op-reply)
this.index = 20;
// Read the message body
this.responseFlags = this.data.readInt32LE(0);
this.cursorId = new BSON.Long(this.data.readInt32LE(4), this.data.readInt32LE(8));
this.startingFrom = this.data.readInt32LE(12);
this.numberReturned = this.data.readInt32LE(16);
if (this.numberReturned < 0 || this.numberReturned > 2 ** 32 - 1) {
throw new RangeError(
`OP_REPLY numberReturned is an invalid array length ${this.numberReturned}`
);
}
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0;
this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0;
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0;
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0;
// Parse Body
for (let i = 0; i < this.numberReturned; i++) {
const bsonSize =
this.data[this.index] |
(this.data[this.index + 1] << 8) |
(this.data[this.index + 2] << 16) |
(this.data[this.index + 3] << 24);
const section = this.data.subarray(this.index, this.index + bsonSize);
this.sections.push(section);
// Adjust the index
this.index = this.index + bsonSize;
}
// Set parsed
this.parsed = true;
return this.sections[0];
}
}
// Msg Flags
const OPTS_CHECKSUM_PRESENT = 1;
const OPTS_MORE_TO_COME = 2;
const OPTS_EXHAUST_ALLOWED = 1 << 16;
/** @internal */
export interface OpMsgOptions {
socketTimeoutMS?: number;
session?: ClientSession;
numberToSkip?: number;
numberToReturn?: number;
returnFieldSelector?: Document;
pre32Limit?: number;
serializeFunctions?: boolean;
ignoreUndefined?: boolean;
maxBsonSize?: number;
checkKeys?: boolean;
secondaryOk?: boolean;
requestId?: number;
moreToCome?: boolean;
exhaustAllowed?: boolean;
readPreference: ReadPreference;
}
/** @internal */
export class DocumentSequence {
field: string;
documents: Document[];
serializedDocumentsLength: number;
private chunks: Uint8Array[];
private header: Buffer;
/**
* Create a new document sequence for the provided field.
* @param field - The field it will replace.
*/
constructor(field: string, documents?: Document[]) {
this.field = field;
this.documents = [];
this.chunks = [];
this.serializedDocumentsLength = 0;
// Document sequences starts with type 1 at the first byte.
// Field strings must always be UTF-8.
const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${this.field}\0`, 5);
this.chunks.push(buffer);
this.header = buffer;
if (documents) {
for (const doc of documents) {
this.push(doc, BSON.serialize(doc));
}
}
}
/**
* Push a document to the document sequence. Will serialize the document
* as well and return the current serialized length of all documents.
* @param document - The document to add.
* @param buffer - The serialized document in raw BSON.
* @returns The new total document sequence length.
*/
push(document: Document, buffer: Uint8Array): number {
this.serializedDocumentsLength += buffer.length;
// Push the document.
this.documents.push(document);
// Push the document raw bson.
this.chunks.push(buffer);
// Write the new length.
this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1);
return this.serializedDocumentsLength + this.header.length;
}
/**
* Get the fully serialized bytes for the document sequence section.
* @returns The section bytes.
*/
toBin(): Uint8Array {
return Buffer.concat(this.chunks);
}
}
/** @internal */
export class OpMsgRequest {
requestId: number;
serializeFunctions: boolean;
ignoreUndefined: boolean;
checkKeys: boolean;
maxBsonSize: number;
checksumPresent: boolean;
moreToCome: boolean;
exhaustAllowed: boolean;
constructor(
public databaseName: string,
public command: Document,
public options: OpQueryOptions
) {
// Basic options needed to be passed in
if (command == null)
throw new MongoInvalidArgumentError('Query document must be specified for query');
// Basic options
this.command.$db = databaseName;
// Ensure empty options
this.options = options ?? {};
// Additional options
this.requestId = options.requestId ? options.requestId : OpMsgRequest.getRequestId();
// Serialization option
this.serializeFunctions =
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined =
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
// flags
this.checksumPresent = false;
this.moreToCome = options.moreToCome ?? command.writeConcern?.w === 0;
this.exhaustAllowed =
typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false;
}
toBin(): Buffer[] {
const buffers: Buffer[] = [];
let flags = 0;
if (this.checksumPresent) {
flags |= OPTS_CHECKSUM_PRESENT;
}
if (this.moreToCome) {
flags |= OPTS_MORE_TO_COME;
}
if (this.exhaustAllowed) {
flags |= OPTS_EXHAUST_ALLOWED;
}
const header = Buffer.alloc(
4 * 4 + // Header
4 // Flags
);
buffers.push(header);
let totalLength = header.length;
const command = this.command;
totalLength += this.makeSections(buffers, command);
header.writeInt32LE(totalLength, 0); // messageLength
header.writeInt32LE(this.requestId, 4); // requestID
header.writeInt32LE(0, 8); // responseTo
header.writeInt32LE(OP_MSG, 12); // opCode
header.writeUInt32LE(flags, 16); // flags
return buffers;
}
/**
* Add the sections to the OP_MSG request's buffers and returns the length.
*/
makeSections(buffers: Uint8Array[], document: Document): number {
const sequencesBuffer = this.extractDocumentSequences(document);
const payloadTypeBuffer = Buffer.allocUnsafe(1);
payloadTypeBuffer[0] = 0;
const documentBuffer = this.serializeBson(document);
// First section, type 0
buffers.push(payloadTypeBuffer);
buffers.push(documentBuffer);
// Subsequent sections, type 1
buffers.push(sequencesBuffer);
return payloadTypeBuffer.length + documentBuffer.length + sequencesBuffer.length;
}
/**
* Extracts the document sequences from the command document and returns
* a buffer to be added as multiple sections after the initial type 0
* section in the message.
*/
extractDocumentSequences(document: Document): Uint8Array {
// Pull out any field in the command document that's value is a document sequence.
const chunks = [];
for (const [key, value] of Object.entries(document)) {
if (value instanceof DocumentSequence) {
chunks.push(value.toBin());
// Why are we removing the field from the command? This is because it needs to be
// removed in the OP_MSG request first section, and DocumentSequence is not a
// BSON type and is specific to the MongoDB wire protocol so there's nothing
// our BSON serializer can do about this. Since DocumentSequence is not exposed
// in the public API and only used internally, we are never mutating an original
// command provided by the user, just our own, and it's cheaper to delete from
// our own command than copying it.
delete document[key];
}
}
if (chunks.length > 0) {
return Buffer.concat(chunks);
}
// If we have no document sequences we return an empty buffer for nothing to add
// to the payload.
return Buffer.alloc(0);
}
serializeBson(document: Document): Uint8Array {
return BSON.serialize(document, {
checkKeys: this.checkKeys,
serializeFunctions: this.serializeFunctions,
ignoreUndefined: this.ignoreUndefined
});
}
static getRequestId(): number {
_requestId = (_requestId + 1) & 0x7fffffff;
return _requestId;
}
}
/** @internal */
export class OpMsgResponse {
parsed: boolean;
raw: Buffer;
data: Buffer;
opts: BSONSerializeOptions;
length: number;
requestId: number;
responseTo: number;
opCode: number;
fromCompressed?: boolean;
responseFlags: number;
checksumPresent: boolean;
/** Indicates the server will be sending more responses on this connection */
moreToCome: boolean;
exhaustAllowed: boolean;
useBigInt64: boolean;
promoteLongs: boolean;
promoteValues: boolean;
promoteBuffers: boolean;
bsonRegExp: boolean;
index = 0;
sections: Uint8Array[] = [];
constructor(
message: Buffer,
msgHeader: MessageHeader,
msgBody: Buffer,
opts?: BSONSerializeOptions
) {
this.parsed = false;
this.raw = message;
this.data = msgBody;
this.opts = opts ?? {
useBigInt64: false,
promoteLongs: true,
promoteValues: true,
promoteBuffers: false,
bsonRegExp: false
};
// Read the message header
this.length = msgHeader.length;
this.requestId = msgHeader.requestId;
this.responseTo = msgHeader.responseTo;
this.opCode = msgHeader.opCode;
this.fromCompressed = msgHeader.fromCompressed;
// Read response flags
this.responseFlags = msgBody.readInt32LE(0);
this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0;
this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0;
this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0;
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
this.promoteValues =
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
this.promoteBuffers =
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
}
isParsed(): boolean {
return this.parsed;
}
parse(): Uint8Array {
// Don't parse again if not needed
if (this.parsed) return this.sections[0];
this.index = 4;
while (this.index < this.data.length) {
const payloadType = this.data.readUInt8(this.index++);
if (payloadType === 0) {
const bsonSize = this.data.readUInt32LE(this.index);
const bin = this.data.subarray(this.index, this.index + bsonSize);
this.sections.push(bin);
this.index += bsonSize;
} else if (payloadType === 1) {
// It was decided that no driver makes use of payload type 1
// TODO(NODE-3483): Replace with MongoDeprecationError
throw new MongoRuntimeError('OP_MSG Payload Type 1 detected unsupported protocol');
}
}
this.parsed = true;
return this.sections[0];
}
}
const MESSAGE_HEADER_SIZE = 16;
const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID
/**
* @internal
*
* An OP_COMPRESSED request wraps either an OP_QUERY or OP_MSG message.
*/
export class OpCompressedRequest {
constructor(
private command: WriteProtocolMessageType,
private options: { zlibCompressionLevel: number; agreedCompressor: CompressorName }
) {}
// Return whether a command contains an uncompressible command term
// Will return true if command contains no uncompressible command terms
static canCompress(command: WriteProtocolMessageType) {
const commandDoc = command instanceof OpMsgRequest ? command.command : command.query;
const commandName = Object.keys(commandDoc)[0];
return !uncompressibleCommands.has(commandName);
}
async toBin(): Promise<Buffer[]> {
const concatenatedOriginalCommandBuffer = Buffer.concat(this.command.toBin());
// otherwise, compress the message
const messageToBeCompressed = concatenatedOriginalCommandBuffer.slice(MESSAGE_HEADER_SIZE);
// Extract information needed for OP_COMPRESSED from the uncompressed message
const originalCommandOpCode = concatenatedOriginalCommandBuffer.readInt32LE(12);
// Compress the message body
const compressedMessage = await compress(this.options, messageToBeCompressed);
// Create the msgHeader of OP_COMPRESSED
const msgHeader = Buffer.alloc(MESSAGE_HEADER_SIZE);
msgHeader.writeInt32LE(
MESSAGE_HEADER_SIZE + COMPRESSION_DETAILS_SIZE + compressedMessage.length,
0
); // messageLength
msgHeader.writeInt32LE(this.command.requestId, 4); // requestID
msgHeader.writeInt32LE(0, 8); // responseTo (zero)
msgHeader.writeInt32LE(OP_COMPRESSED, 12); // opCode
// Create the compression details of OP_COMPRESSED
const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE);
compressionDetails.writeInt32LE(originalCommandOpCode, 0); // originalOpcode
compressionDetails.writeInt32LE(messageToBeCompressed.length, 4); // Size of the uncompressed compressedMessage, excluding the MsgHeader
compressionDetails.writeUInt8(Compressor[this.options.agreedCompressor], 8); // compressorID
return [msgHeader, compressionDetails, compressedMessage];
}
}

View file

@ -0,0 +1,498 @@
import type { Socket, SocketConnectOpts } from 'net';
import * as net from 'net';
import type { ConnectionOptions as TLSConnectionOpts, TLSSocket } from 'tls';
import * as tls from 'tls';
import type { Document } from '../bson';
import { LEGACY_HELLO_COMMAND } from '../constants';
import { getSocks, type SocksLib } from '../deps';
import {
MongoCompatibilityError,
MongoError,
MongoErrorLabel,
MongoInvalidArgumentError,
MongoNetworkError,
MongoNetworkTimeoutError,
MongoRuntimeError,
needsRetryableWriteLabel
} from '../error';
import { HostAddress, ns, promiseWithResolvers } from '../utils';
import { AuthContext } from './auth/auth_provider';
import { AuthMechanism } from './auth/providers';
import {
type CommandOptions,
Connection,
type ConnectionOptions,
CryptoConnection
} from './connection';
import {
MAX_SUPPORTED_SERVER_VERSION,
MAX_SUPPORTED_WIRE_VERSION,
MIN_SUPPORTED_SERVER_VERSION,
MIN_SUPPORTED_WIRE_VERSION
} from './wire_protocol/constants';
/** @public */
export type Stream = Socket | TLSSocket;
export async function connect(options: ConnectionOptions): Promise<Connection> {
let connection: Connection | null = null;
try {
const socket = await makeSocket(options);
connection = makeConnection(options, socket);
await performInitialHandshake(connection, options);
return connection;
} catch (error) {
connection?.destroy();
throw error;
}
}
export function makeConnection(options: ConnectionOptions, socket: Stream): Connection {
let ConnectionType = options.connectionType ?? Connection;
if (options.autoEncrypter) {
ConnectionType = CryptoConnection;
}
return new ConnectionType(socket, options);
}
function checkSupportedServer(hello: Document, options: ConnectionOptions) {
const maxWireVersion = Number(hello.maxWireVersion);
const minWireVersion = Number(hello.minWireVersion);
const serverVersionHighEnough =
!Number.isNaN(maxWireVersion) && maxWireVersion >= MIN_SUPPORTED_WIRE_VERSION;
const serverVersionLowEnough =
!Number.isNaN(minWireVersion) && minWireVersion <= MAX_SUPPORTED_WIRE_VERSION;
if (serverVersionHighEnough) {
if (serverVersionLowEnough) {
return null;
}
const message = `Server at ${options.hostAddress} reports minimum wire version ${JSON.stringify(
hello.minWireVersion
)}, but this version of the Node.js Driver requires at most ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
return new MongoCompatibilityError(message);
}
const message = `Server at ${options.hostAddress} reports maximum wire version ${
JSON.stringify(hello.maxWireVersion) ?? 0
}, but this version of the Node.js Driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION})`;
return new MongoCompatibilityError(message);
}
export async function performInitialHandshake(
conn: Connection,
options: ConnectionOptions
): Promise<void> {
const credentials = options.credentials;
if (credentials) {
if (
!(credentials.mechanism === AuthMechanism.MONGODB_DEFAULT) &&
!options.authProviders.getOrCreateProvider(
credentials.mechanism,
credentials.mechanismProperties
)
) {
throw new MongoInvalidArgumentError(`AuthMechanism '${credentials.mechanism}' not supported`);
}
}
const authContext = new AuthContext(conn, credentials, options);
conn.authContext = authContext;
const handshakeDoc = await prepareHandshakeDocument(authContext);
// @ts-expect-error: TODO(NODE-5141): The options need to be filtered properly, Connection options differ from Command options
const handshakeOptions: CommandOptions = { ...options, raw: false };
if (typeof options.connectTimeoutMS === 'number') {
// The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS
handshakeOptions.socketTimeoutMS = options.connectTimeoutMS;
}
const start = new Date().getTime();
const response = await executeHandshake(handshakeDoc, handshakeOptions);
if (!('isWritablePrimary' in response)) {
// Provide hello-style response document.
response.isWritablePrimary = response[LEGACY_HELLO_COMMAND];
}
if (response.helloOk) {
conn.helloOk = true;
}
const supportedServerErr = checkSupportedServer(response, options);
if (supportedServerErr) {
throw supportedServerErr;
}
if (options.loadBalanced) {
if (!response.serviceId) {
throw new MongoCompatibilityError(
'Driver attempted to initialize in load balancing mode, ' +
'but the server does not support this mode.'
);
}
}
// NOTE: This is metadata attached to the connection while porting away from
// handshake being done in the `Server` class. Likely, it should be
// relocated, or at very least restructured.
conn.hello = response;
conn.lastHelloMS = new Date().getTime() - start;
if (!response.arbiterOnly && credentials) {
// store the response on auth context
authContext.response = response;
const resolvedCredentials = credentials.resolveAuthMechanism(response);
const provider = options.authProviders.getOrCreateProvider(
resolvedCredentials.mechanism,
resolvedCredentials.mechanismProperties
);
if (!provider) {
throw new MongoInvalidArgumentError(
`No AuthProvider for ${resolvedCredentials.mechanism} defined.`
);
}
try {
await provider.auth(authContext);
} catch (error) {
if (error instanceof MongoError) {
error.addErrorLabel(MongoErrorLabel.HandshakeError);
if (needsRetryableWriteLabel(error, response.maxWireVersion, conn.description.type)) {
error.addErrorLabel(MongoErrorLabel.RetryableWriteError);
}
}
throw error;
}
}
// Connection establishment is socket creation (tcp handshake, tls handshake, MongoDB handshake (saslStart, saslContinue))
// Once connection is established, command logging can log events (if enabled)
conn.established = true;
async function executeHandshake(handshakeDoc: Document, handshakeOptions: CommandOptions) {
try {
const handshakeResponse = await conn.command(
ns('admin.$cmd'),
handshakeDoc,
handshakeOptions
);
return handshakeResponse;
} catch (error) {
if (error instanceof MongoError) {
error.addErrorLabel(MongoErrorLabel.HandshakeError);
}
throw error;
}
}
}
/**
* HandshakeDocument used during authentication.
* @internal
*/
export interface HandshakeDocument extends Document {
/**
* @deprecated Use hello instead
*/
ismaster?: boolean;
hello?: boolean;
helloOk?: boolean;
client: Document;
compression: string[];
saslSupportedMechs?: string;
loadBalanced?: boolean;
}
/**
* @internal
*
* This function is only exposed for testing purposes.
*/
export async function prepareHandshakeDocument(
authContext: AuthContext
): Promise<HandshakeDocument> {
const options = authContext.options;
const compressors = options.compressors ? options.compressors : [];
const { serverApi } = authContext.connection;
const clientMetadata: Document = await options.extendedMetadata;
const handshakeDoc: HandshakeDocument = {
[serverApi?.version || options.loadBalanced === true ? 'hello' : LEGACY_HELLO_COMMAND]: 1,
helloOk: true,
client: clientMetadata,
compression: compressors
};
if (options.loadBalanced === true) {
handshakeDoc.loadBalanced = true;
}
const credentials = authContext.credentials;
if (credentials) {
if (credentials.mechanism === AuthMechanism.MONGODB_DEFAULT && credentials.username) {
handshakeDoc.saslSupportedMechs = `${credentials.source}.${credentials.username}`;
const provider = authContext.options.authProviders.getOrCreateProvider(
AuthMechanism.MONGODB_SCRAM_SHA256,
credentials.mechanismProperties
);
if (!provider) {
// This auth mechanism is always present.
throw new MongoInvalidArgumentError(
`No AuthProvider for ${AuthMechanism.MONGODB_SCRAM_SHA256} defined.`
);
}
return await provider.prepare(handshakeDoc, authContext);
}
const provider = authContext.options.authProviders.getOrCreateProvider(
credentials.mechanism,
credentials.mechanismProperties
);
if (!provider) {
throw new MongoInvalidArgumentError(`No AuthProvider for ${credentials.mechanism} defined.`);
}
return await provider.prepare(handshakeDoc, authContext);
}
return handshakeDoc;
}
/** @public */
export const LEGAL_TLS_SOCKET_OPTIONS = [
'allowPartialTrustChain',
'ALPNProtocols',
'ca',
'cert',
'checkServerIdentity',
'ciphers',
'crl',
'ecdhCurve',
'key',
'minDHSize',
'passphrase',
'pfx',
'rejectUnauthorized',
'secureContext',
'secureProtocol',
'servername',
'session'
] as const;
/** @public */
export const LEGAL_TCP_SOCKET_OPTIONS = [
'autoSelectFamily',
'autoSelectFamilyAttemptTimeout',
'family',
'hints',
'localAddress',
'localPort',
'lookup'
] as const;
function parseConnectOptions(options: ConnectionOptions): SocketConnectOpts {
const hostAddress = options.hostAddress;
if (!hostAddress) throw new MongoInvalidArgumentError('Option "hostAddress" is required');
const result: Partial<net.TcpNetConnectOpts & net.IpcNetConnectOpts> = {};
for (const name of LEGAL_TCP_SOCKET_OPTIONS) {
if (options[name] != null) {
(result as Document)[name] = options[name];
}
}
if (typeof hostAddress.socketPath === 'string') {
result.path = hostAddress.socketPath;
return result as net.IpcNetConnectOpts;
} else if (typeof hostAddress.host === 'string') {
result.host = hostAddress.host;
result.port = hostAddress.port;
return result as net.TcpNetConnectOpts;
} else {
// This should never happen since we set up HostAddresses
// But if we don't throw here the socket could hang until timeout
// TODO(NODE-3483)
throw new MongoRuntimeError(`Unexpected HostAddress ${JSON.stringify(hostAddress)}`);
}
}
type MakeConnectionOptions = ConnectionOptions & { existingSocket?: Stream };
function parseSslOptions(options: MakeConnectionOptions): TLSConnectionOpts {
const result: TLSConnectionOpts = parseConnectOptions(options);
// Merge in valid SSL options
for (const name of LEGAL_TLS_SOCKET_OPTIONS) {
if (options[name] != null) {
(result as Document)[name] = options[name];
}
}
if (options.existingSocket) {
result.socket = options.existingSocket;
}
// Set default sni servername to be the same as host
if (result.servername == null && result.host && !net.isIP(result.host)) {
result.servername = result.host;
}
return result;
}
export async function makeSocket(options: MakeConnectionOptions): Promise<Stream> {
const useTLS = options.tls ?? false;
const noDelay = options.noDelay ?? true;
const connectTimeoutMS = options.connectTimeoutMS ?? 30000;
const existingSocket = options.existingSocket;
let socket: Stream;
if (options.proxyHost != null) {
// Currently, only Socks5 is supported.
return await makeSocks5Connection({
...options,
connectTimeoutMS // Should always be present for Socks5
});
}
if (useTLS) {
const tlsSocket = tls.connect(parseSslOptions(options));
if (typeof tlsSocket.disableRenegotiation === 'function') {
tlsSocket.disableRenegotiation();
}
socket = tlsSocket;
} else if (existingSocket) {
// In the TLS case, parseSslOptions() sets options.socket to existingSocket,
// so we only need to handle the non-TLS case here (where existingSocket
// gives us all we need out of the box).
socket = existingSocket;
} else {
socket = net.createConnection(parseConnectOptions(options));
}
socket.setKeepAlive(true, 300000);
socket.setTimeout(connectTimeoutMS);
socket.setNoDelay(noDelay);
let cancellationHandler: ((err: Error) => void) | null = null;
const { promise: connectedSocket, resolve, reject } = promiseWithResolvers<Stream>();
if (existingSocket) {
resolve(socket);
} else {
const start = performance.now();
const connectEvent = useTLS ? 'secureConnect' : 'connect';
socket
.once(connectEvent, () => resolve(socket))
.once('error', cause =>
reject(new MongoNetworkError(MongoError.buildErrorMessage(cause), { cause }))
)
.once('timeout', () => {
reject(
new MongoNetworkTimeoutError(
`Socket '${connectEvent}' timed out after ${(performance.now() - start) | 0}ms (connectTimeoutMS: ${connectTimeoutMS})`
)
);
})
.once('close', () =>
reject(
new MongoNetworkError(
`Socket closed after ${(performance.now() - start) | 0} during connection establishment`
)
)
);
if (options.cancellationToken != null) {
cancellationHandler = () =>
reject(
new MongoNetworkError(
`Socket connection establishment was cancelled after ${(performance.now() - start) | 0}`
)
);
options.cancellationToken.once('cancel', cancellationHandler);
}
}
try {
socket = await connectedSocket;
return socket;
} catch (error) {
socket.destroy();
throw error;
} finally {
socket.setTimeout(0);
if (cancellationHandler != null) {
options.cancellationToken?.removeListener('cancel', cancellationHandler);
}
}
}
let socks: SocksLib | null = null;
function loadSocks() {
if (socks == null) {
const socksImport = getSocks();
if ('kModuleError' in socksImport) {
throw socksImport.kModuleError;
}
socks = socksImport;
}
return socks;
}
async function makeSocks5Connection(options: MakeConnectionOptions): Promise<Stream> {
const hostAddress = HostAddress.fromHostPort(
options.proxyHost ?? '', // proxyHost is guaranteed to set here
options.proxyPort ?? 1080
);
// First, connect to the proxy server itself:
const rawSocket = await makeSocket({
...options,
hostAddress,
tls: false,
proxyHost: undefined
});
const destination = parseConnectOptions(options) as net.TcpNetConnectOpts;
if (typeof destination.host !== 'string' || typeof destination.port !== 'number') {
throw new MongoInvalidArgumentError('Can only make Socks5 connections to TCP hosts');
}
socks ??= loadSocks();
let existingSocket: Stream;
try {
// Then, establish the Socks5 proxy connection:
const connection = await socks.SocksClient.createConnection({
existing_socket: rawSocket,
timeout: options.connectTimeoutMS,
command: 'connect',
destination: {
host: destination.host,
port: destination.port
},
proxy: {
// host and port are ignored because we pass existing_socket
host: 'iLoveJavaScript',
port: 0,
type: 5,
userId: options.proxyUsername || undefined,
password: options.proxyPassword || undefined
}
});
existingSocket = connection.socket;
} catch (cause) {
throw new MongoNetworkError(MongoError.buildErrorMessage(cause), { cause });
}
// Finally, now treat the resulting duplex stream as the
// socket over which we send and receive wire protocol messages:
return await makeSocket({ ...options, existingSocket, proxyHost: undefined });
}

View file

@ -0,0 +1,932 @@
import { type Readable, Transform, type TransformCallback } from 'stream';
import { clearTimeout, setTimeout } from 'timers';
import {
type BSONSerializeOptions,
deserialize,
type DeserializeOptions,
type Document,
type ObjectId
} from '../bson';
import { type AutoEncrypter } from '../client-side-encryption/auto_encrypter';
import {
CLOSE,
CLUSTER_TIME_RECEIVED,
COMMAND_FAILED,
COMMAND_STARTED,
COMMAND_SUCCEEDED,
kDecorateResult,
PINNED,
UNPINNED
} from '../constants';
import {
MongoCompatibilityError,
MONGODB_ERROR_CODES,
MongoMissingDependencyError,
MongoNetworkError,
MongoNetworkTimeoutError,
MongoOperationTimeoutError,
MongoParseError,
MongoRuntimeError,
MongoServerError,
MongoUnexpectedServerResponseError
} from '../error';
import type { ServerApi, SupportedNodeConnectionOptions } from '../mongo_client';
import { type MongoClientAuthProviders } from '../mongo_client_auth_providers';
import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mongo_logger';
import { type Abortable, type CancellationToken, TypedEventEmitter } from '../mongo_types';
import { ReadPreference, type ReadPreferenceLike } from '../read_preference';
import { ServerType } from '../sdam/common';
import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions';
import { type TimeoutContext, TimeoutError } from '../timeout';
import {
BufferPool,
calculateDurationInMs,
type Callback,
decorateDecryptionResult,
HostAddress,
maxWireVersion,
type MongoDBNamespace,
noop,
now,
once,
squashError,
uuidV4
} from '../utils';
import type { WriteConcern } from '../write_concern';
import type { AuthContext } from './auth/auth_provider';
import type { MongoCredentials } from './auth/mongo_credentials';
import {
CommandFailedEvent,
CommandStartedEvent,
CommandSucceededEvent
} from './command_monitoring_events';
import {
OpCompressedRequest,
OpMsgRequest,
type OpMsgResponse,
OpQueryRequest,
type OpReply,
type WriteProtocolMessageType
} from './commands';
import type { Stream } from './connect';
import type { ClientMetadata } from './handshake/client_metadata';
import { StreamDescription, type StreamDescriptionOptions } from './stream_description';
import { type CompressorName, decompressResponse } from './wire_protocol/compression';
import { onData } from './wire_protocol/on_data';
import {
CursorResponse,
MongoDBResponse,
type MongoDBResponseConstructor
} from './wire_protocol/responses';
import { getReadPreference, isSharded } from './wire_protocol/shared';
/** @internal */
export interface CommandOptions extends BSONSerializeOptions {
secondaryOk?: boolean;
/** Specify read preference if command supports it */
readPreference?: ReadPreferenceLike;
monitoring?: boolean;
socketTimeoutMS?: number;
/** Session to use for the operation */
session?: ClientSession;
documentsReturnedIn?: string;
noResponse?: boolean;
omitReadPreference?: boolean;
omitMaxTimeMS?: boolean;
// TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint
// from executeOperation that the txnNum should be applied to this command.
// Applying a session to a command should happen as part of command construction,
// most likely in the CommandOperation#executeCommand method, where we have access to
// the details we need to determine if a txnNum should also be applied.
willRetryWrite?: boolean;
writeConcern?: WriteConcern;
directConnection?: boolean;
/** @internal */
timeoutContext?: TimeoutContext;
}
/** @public */
export interface ProxyOptions {
proxyHost?: string;
proxyPort?: number;
proxyUsername?: string;
proxyPassword?: string;
}
/** @public */
export interface ConnectionOptions
extends SupportedNodeConnectionOptions,
StreamDescriptionOptions,
ProxyOptions {
// Internal creation info
id: number | '<monitor>';
generation: number;
hostAddress: HostAddress;
/** @internal */
autoEncrypter?: AutoEncrypter;
serverApi?: ServerApi;
monitorCommands: boolean;
/** @internal */
connectionType?: any;
credentials?: MongoCredentials;
/** @internal */
authProviders: MongoClientAuthProviders;
connectTimeoutMS?: number;
tls: boolean;
noDelay?: boolean;
socketTimeoutMS?: number;
cancellationToken?: CancellationToken;
metadata: ClientMetadata;
/** @internal */
extendedMetadata: Promise<Document>;
/** @internal */
mongoLogger?: MongoLogger | undefined;
}
/** @public */
export type ConnectionEvents = {
commandStarted(event: CommandStartedEvent): void;
commandSucceeded(event: CommandSucceededEvent): void;
commandFailed(event: CommandFailedEvent): void;
clusterTimeReceived(clusterTime: Document): void;
close(): void;
pinned(pinType: string): void;
unpinned(pinType: string): void;
};
/** @internal */
export function hasSessionSupport(conn: Connection): boolean {
const description = conn.description;
return description.logicalSessionTimeoutMinutes != null;
}
function streamIdentifier(stream: Stream, options: ConnectionOptions): string {
if (options.proxyHost) {
// If proxy options are specified, the properties of `stream` itself
// will not accurately reflect what endpoint this is connected to.
return options.hostAddress.toString();
}
const { remoteAddress, remotePort } = stream;
if (typeof remoteAddress === 'string' && typeof remotePort === 'number') {
return HostAddress.fromHostPort(remoteAddress, remotePort).toString();
}
return uuidV4().toString('hex');
}
/** @internal */
export class Connection extends TypedEventEmitter<ConnectionEvents> {
public id: number | '<monitor>';
public address: string;
public lastHelloMS = -1;
public serverApi?: ServerApi;
public helloOk = false;
public authContext?: AuthContext;
public delayedTimeoutId: NodeJS.Timeout | null = null;
public generation: number;
public accessToken?: string;
public readonly description: Readonly<StreamDescription>;
/**
* Represents if the connection has been established:
* - TCP handshake
* - TLS negotiated
* - mongodb handshake (saslStart, saslContinue), includes authentication
*
* Once connection is established, command logging can log events (if enabled)
*/
public established: boolean;
/** Indicates that the connection (including underlying TCP socket) has been closed. */
public closed = false;
private lastUseTime: number;
private clusterTime: Document | null = null;
private error: Error | null = null;
private dataEvents: AsyncGenerator<Buffer, void, void> | null = null;
private readonly socketTimeoutMS: number;
private readonly monitorCommands: boolean;
private readonly socket: Stream;
private readonly messageStream: Readable;
/** @event */
static readonly COMMAND_STARTED = COMMAND_STARTED;
/** @event */
static readonly COMMAND_SUCCEEDED = COMMAND_SUCCEEDED;
/** @event */
static readonly COMMAND_FAILED = COMMAND_FAILED;
/** @event */
static readonly CLUSTER_TIME_RECEIVED = CLUSTER_TIME_RECEIVED;
/** @event */
static readonly CLOSE = CLOSE;
/** @event */
static readonly PINNED = PINNED;
/** @event */
static readonly UNPINNED = UNPINNED;
constructor(stream: Stream, options: ConnectionOptions) {
super();
this.on('error', noop);
this.socket = stream;
this.id = options.id;
this.address = streamIdentifier(stream, options);
this.socketTimeoutMS = options.socketTimeoutMS ?? 0;
this.monitorCommands = options.monitorCommands;
this.serverApi = options.serverApi;
this.mongoLogger = options.mongoLogger;
this.established = false;
this.description = new StreamDescription(this.address, options);
this.generation = options.generation;
this.lastUseTime = now();
this.messageStream = this.socket
.on('error', this.onError.bind(this))
.pipe(new SizedMessageTransform({ connection: this }))
.on('error', this.onError.bind(this));
this.socket.on('close', this.onClose.bind(this));
this.socket.on('timeout', this.onTimeout.bind(this));
this.messageStream.pause();
}
public get hello() {
return this.description.hello;
}
// the `connect` method stores the result of the handshake hello on the connection
public set hello(response: Document | null) {
this.description.receiveResponse(response);
Object.freeze(this.description);
}
public get serviceId(): ObjectId | undefined {
return this.hello?.serviceId;
}
public get loadBalanced(): boolean {
return this.description.loadBalanced;
}
public get idleTime(): number {
return calculateDurationInMs(this.lastUseTime);
}
private get hasSessionSupport(): boolean {
return this.description.logicalSessionTimeoutMinutes != null;
}
private get supportsOpMsg(): boolean {
return (
this.description != null &&
maxWireVersion(this) >= 6 &&
!this.description.__nodejs_mock_server__
);
}
private get shouldEmitAndLogCommand(): boolean {
return (
(this.monitorCommands ||
(this.established &&
!this.authContext?.reauthenticating &&
this.mongoLogger?.willLog(MongoLoggableComponent.COMMAND, SeverityLevel.DEBUG))) ??
false
);
}
public markAvailable(): void {
this.lastUseTime = now();
}
public onError(error: Error) {
this.cleanup(error);
}
private onClose() {
const message = `connection ${this.id} to ${this.address} closed`;
this.cleanup(new MongoNetworkError(message));
}
private onTimeout() {
this.delayedTimeoutId = setTimeout(() => {
const message = `connection ${this.id} to ${this.address} timed out`;
const beforeHandshake = this.hello == null;
this.cleanup(new MongoNetworkTimeoutError(message, { beforeHandshake }));
}, 1).unref(); // No need for this timer to hold the event loop open
}
public destroy(): void {
if (this.closed) {
return;
}
// load balanced mode requires that these listeners remain on the connection
// after cleanup on timeouts, errors or close so we remove them before calling
// cleanup.
this.removeAllListeners(Connection.PINNED);
this.removeAllListeners(Connection.UNPINNED);
const message = `connection ${this.id} to ${this.address} closed`;
this.cleanup(new MongoNetworkError(message));
}
/**
* A method that cleans up the connection. When `force` is true, this method
* forcibly destroys the socket.
*
* If an error is provided, any in-flight operations will be closed with the error.
*
* This method does nothing if the connection is already closed.
*/
private cleanup(error: Error): void {
if (this.closed) {
return;
}
this.socket.destroy();
this.error = error;
this.dataEvents?.throw(error).then(undefined, squashError);
this.closed = true;
this.emit(Connection.CLOSE);
}
private prepareCommand(db: string, command: Document, options: CommandOptions) {
let cmd = { ...command };
const readPreference = getReadPreference(options);
const session = options?.session;
let clusterTime = this.clusterTime;
if (this.serverApi) {
const { version, strict, deprecationErrors } = this.serverApi;
cmd.apiVersion = version;
if (strict != null) cmd.apiStrict = strict;
if (deprecationErrors != null) cmd.apiDeprecationErrors = deprecationErrors;
}
if (this.hasSessionSupport && session) {
if (
session.clusterTime &&
clusterTime &&
session.clusterTime.clusterTime.greaterThan(clusterTime.clusterTime)
) {
clusterTime = session.clusterTime;
}
const sessionError = applySession(session, cmd, options);
if (sessionError) throw sessionError;
} else if (session?.explicit) {
throw new MongoCompatibilityError('Current topology does not support sessions');
}
// if we have a known cluster time, gossip it
if (clusterTime) {
cmd.$clusterTime = clusterTime;
}
// For standalone, drivers MUST NOT set $readPreference.
if (this.description.type !== ServerType.Standalone) {
if (
!isSharded(this) &&
!this.description.loadBalanced &&
this.supportsOpMsg &&
options.directConnection === true &&
readPreference?.mode === 'primary'
) {
// For mongos and load balancers with 'primary' mode, drivers MUST NOT set $readPreference.
// For all other types with a direct connection, if the read preference is 'primary'
// (driver sets 'primary' as default if no read preference is configured),
// the $readPreference MUST be set to 'primaryPreferred'
// to ensure that any server type can handle the request.
cmd.$readPreference = ReadPreference.primaryPreferred.toJSON();
} else if (isSharded(this) && !this.supportsOpMsg && readPreference?.mode !== 'primary') {
// When sending a read operation via OP_QUERY and the $readPreference modifier,
// the query MUST be provided using the $query modifier.
cmd = {
$query: cmd,
$readPreference: readPreference.toJSON()
};
} else if (readPreference?.mode !== 'primary') {
// For mode 'primary', drivers MUST NOT set $readPreference.
// For all other read preference modes (i.e. 'secondary', 'primaryPreferred', ...),
// drivers MUST set $readPreference
cmd.$readPreference = readPreference.toJSON();
}
}
const commandOptions = {
numberToSkip: 0,
numberToReturn: -1,
checkKeys: false,
// This value is not overridable
secondaryOk: readPreference.secondaryOk(),
...options
};
options.timeoutContext?.addMaxTimeMSToCommand(cmd, options);
const message = this.supportsOpMsg
? new OpMsgRequest(db, cmd, commandOptions)
: new OpQueryRequest(db, cmd, commandOptions);
return message;
}
private async *sendWire(
message: WriteProtocolMessageType,
options: CommandOptions & Abortable,
responseType?: MongoDBResponseConstructor
): AsyncGenerator<MongoDBResponse> {
this.throwIfAborted();
const timeout =
options.socketTimeoutMS ??
options?.timeoutContext?.getSocketTimeoutMS() ??
this.socketTimeoutMS;
this.socket.setTimeout(timeout);
try {
await this.writeCommand(message, {
agreedCompressor: this.description.compressor ?? 'none',
zlibCompressionLevel: this.description.zlibCompressionLevel,
timeoutContext: options.timeoutContext,
signal: options.signal
});
if (options.noResponse || message.moreToCome) {
yield MongoDBResponse.empty;
return;
}
this.throwIfAborted();
if (
options.timeoutContext?.csotEnabled() &&
options.timeoutContext.minRoundTripTime != null &&
options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime
) {
throw new MongoOperationTimeoutError(
'Server roundtrip time is greater than the time remaining'
);
}
for await (const response of this.readMany(options)) {
this.socket.setTimeout(0);
const bson = response.parse();
const document = (responseType ?? MongoDBResponse).make(bson);
yield document;
this.throwIfAborted();
this.socket.setTimeout(timeout);
}
} finally {
this.socket.setTimeout(0);
}
}
private async *sendCommand(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions & Abortable,
responseType?: MongoDBResponseConstructor
) {
options?.signal?.throwIfAborted();
const message = this.prepareCommand(ns.db, command, options);
let started = 0;
if (this.shouldEmitAndLogCommand) {
started = now();
this.emitAndLogCommand(
this.monitorCommands,
Connection.COMMAND_STARTED,
message.databaseName,
this.established,
new CommandStartedEvent(this, message, this.description.serverConnectionId)
);
}
// If `documentsReturnedIn` not set or raw is not enabled, use input bson options
// Otherwise, support raw flag. Raw only works for cursors that hardcode firstBatch/nextBatch fields
const bsonOptions: DeserializeOptions =
options.documentsReturnedIn == null || !options.raw
? options
: {
...options,
raw: false,
fieldsAsRaw: { [options.documentsReturnedIn]: true }
};
/** MongoDBResponse instance or subclass */
let document: MongoDBResponse | undefined = undefined;
/** Cached result of a toObject call */
let object: Document | undefined = undefined;
try {
this.throwIfAborted();
for await (document of this.sendWire(message, options, responseType)) {
object = undefined;
if (options.session != null) {
updateSessionFromResponse(options.session, document);
}
if (document.$clusterTime) {
this.clusterTime = document.$clusterTime;
this.emit(Connection.CLUSTER_TIME_RECEIVED, document.$clusterTime);
}
if (document.ok === 0) {
if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) {
throw new MongoOperationTimeoutError('Server reported a timeout error', {
cause: new MongoServerError((object ??= document.toObject(bsonOptions)))
});
}
throw new MongoServerError((object ??= document.toObject(bsonOptions)));
}
if (this.shouldEmitAndLogCommand) {
this.emitAndLogCommand(
this.monitorCommands,
Connection.COMMAND_SUCCEEDED,
message.databaseName,
this.established,
new CommandSucceededEvent(
this,
message,
options.noResponse
? undefined
: message.moreToCome
? { ok: 1 }
: (object ??= document.toObject(bsonOptions)),
started,
this.description.serverConnectionId
)
);
}
if (responseType == null) {
yield (object ??= document.toObject(bsonOptions));
} else {
yield document;
}
this.throwIfAborted();
}
} catch (error) {
if (this.shouldEmitAndLogCommand) {
this.emitAndLogCommand(
this.monitorCommands,
Connection.COMMAND_FAILED,
message.databaseName,
this.established,
new CommandFailedEvent(this, message, error, started, this.description.serverConnectionId)
);
}
throw error;
}
}
public async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions | undefined,
responseType: T
): Promise<InstanceType<T>>;
public async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions | undefined,
responseType: T | undefined
): Promise<typeof responseType extends undefined ? Document : InstanceType<T>>;
public async command(
ns: MongoDBNamespace,
command: Document,
options?: CommandOptions
): Promise<Document>;
public async command(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions & Abortable = {},
responseType?: MongoDBResponseConstructor
): Promise<Document> {
this.throwIfAborted();
options.signal?.throwIfAborted();
for await (const document of this.sendCommand(ns, command, options, responseType)) {
if (options.timeoutContext?.csotEnabled()) {
if (MongoDBResponse.is(document)) {
if (document.isMaxTimeExpiredError) {
throw new MongoOperationTimeoutError('Server reported a timeout error', {
cause: new MongoServerError(document.toObject())
});
}
} else {
if (
(Array.isArray(document?.writeErrors) &&
document.writeErrors.some(
error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired
)) ||
document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired
) {
throw new MongoOperationTimeoutError('Server reported a timeout error', {
cause: new MongoServerError(document)
});
}
}
}
return document;
}
throw new MongoUnexpectedServerResponseError('Unable to get response from server');
}
public exhaustCommand(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions,
replyListener: Callback
) {
const exhaustLoop = async () => {
this.throwIfAborted();
for await (const reply of this.sendCommand(ns, command, options)) {
replyListener(undefined, reply);
this.throwIfAborted();
}
throw new MongoUnexpectedServerResponseError('Server ended moreToCome unexpectedly');
};
exhaustLoop().then(undefined, replyListener);
}
private throwIfAborted() {
if (this.error) throw this.error;
}
/**
* @internal
*
* Writes an OP_MSG or OP_QUERY request to the socket, optionally compressing the command. This method
* waits until the socket's buffer has emptied (the Nodejs socket `drain` event has fired).
*/
private async writeCommand(
command: WriteProtocolMessageType,
options: {
agreedCompressor?: CompressorName;
zlibCompressionLevel?: number;
timeoutContext?: TimeoutContext;
} & Abortable
): Promise<void> {
const finalCommand =
options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command)
? command
: new OpCompressedRequest(command, {
agreedCompressor: options.agreedCompressor ?? 'none',
zlibCompressionLevel: options.zlibCompressionLevel ?? 0
});
const buffer = Buffer.concat(await finalCommand.toBin());
if (options.timeoutContext?.csotEnabled()) {
if (
options.timeoutContext.minRoundTripTime != null &&
options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime
) {
throw new MongoOperationTimeoutError(
'Server roundtrip time is greater than the time remaining'
);
}
}
if (this.socket.write(buffer)) return;
const drainEvent = once<void>(this.socket, 'drain', options);
const timeout = options?.timeoutContext?.timeoutForSocketWrite;
const drained = timeout ? Promise.race([drainEvent, timeout]) : drainEvent;
try {
return await drained;
} catch (writeError) {
if (TimeoutError.is(writeError)) {
const timeoutError = new MongoOperationTimeoutError('Timed out at socket write');
this.onError(timeoutError);
throw timeoutError;
} else if (writeError === options.signal?.reason) {
this.onError(writeError);
}
throw writeError;
} finally {
timeout?.clear();
}
}
/**
* @internal
*
* Returns an async generator that yields full wire protocol messages from the underlying socket. This function
* yields messages until `moreToCome` is false or not present in a response, or the caller cancels the request
* by calling `return` on the generator.
*
* Note that `for-await` loops call `return` automatically when the loop is exited.
*/
private async *readMany(
options: {
timeoutContext?: TimeoutContext;
} & Abortable
): AsyncGenerator<OpMsgResponse | OpReply> {
try {
this.dataEvents = onData(this.messageStream, options);
this.messageStream.resume();
for await (const message of this.dataEvents) {
const response = await decompressResponse(message);
yield response;
if (!response.moreToCome) {
return;
}
}
} catch (readError) {
if (TimeoutError.is(readError)) {
const timeoutError = new MongoOperationTimeoutError(
`Timed out during socket read (${readError.duration}ms)`
);
this.dataEvents = null;
this.onError(timeoutError);
throw timeoutError;
} else if (readError === options.signal?.reason) {
this.onError(readError);
}
throw readError;
} finally {
this.dataEvents = null;
this.messageStream.pause();
this.throwIfAborted();
}
}
}
/** @internal */
export class SizedMessageTransform extends Transform {
bufferPool: BufferPool;
connection: Connection;
constructor({ connection }: { connection: Connection }) {
super({ writableObjectMode: false, readableObjectMode: true });
this.bufferPool = new BufferPool();
this.connection = connection;
}
override _transform(chunk: Buffer, encoding: unknown, callback: TransformCallback): void {
if (this.connection.delayedTimeoutId != null) {
clearTimeout(this.connection.delayedTimeoutId);
this.connection.delayedTimeoutId = null;
}
this.bufferPool.append(chunk);
while (this.bufferPool.length) {
// While there are any bytes in the buffer
// Try to fetch a size from the top 4 bytes
const sizeOfMessage = this.bufferPool.getInt32();
if (sizeOfMessage == null) {
// Not even an int32 worth of data. Stop the loop, we need more chunks.
break;
}
if (sizeOfMessage < 0) {
// The size in the message has a negative value, this is probably corruption, throw:
return callback(new MongoParseError(`Message size cannot be negative: ${sizeOfMessage}`));
}
if (sizeOfMessage > this.bufferPool.length) {
// We do not have enough bytes to make a sizeOfMessage chunk
break;
}
// Add a message to the stream
const message = this.bufferPool.read(sizeOfMessage);
if (!this.push(message)) {
// We only subscribe to data events so we should never get backpressure
// if we do, we do not have the handling for it.
return callback(
new MongoRuntimeError(`SizedMessageTransform does not support backpressure`)
);
}
}
callback();
}
}
/** @internal */
export class CryptoConnection extends Connection {
/** @internal */
autoEncrypter?: AutoEncrypter;
constructor(stream: Stream, options: ConnectionOptions) {
super(stream, options);
this.autoEncrypter = options.autoEncrypter;
}
public override async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions | undefined,
responseType: T
): Promise<InstanceType<T>>;
public override async command(
ns: MongoDBNamespace,
command: Document,
options?: CommandOptions
): Promise<Document>;
override async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
cmd: Document,
options?: CommandOptions,
responseType?: T | undefined
): Promise<Document> {
const { autoEncrypter } = this;
if (!autoEncrypter) {
// TODO(NODE-6065): throw a MongoRuntimeError in Node V7
// @ts-expect-error No cause provided because there is no underlying error.
throw new MongoMissingDependencyError('No AutoEncrypter available for encryption', {
dependencyName: 'n/a'
});
}
const serverWireVersion = maxWireVersion(this);
if (serverWireVersion === 0) {
// This means the initial handshake hasn't happened yet
return await super.command<T>(ns, cmd, options, responseType);
}
if (serverWireVersion < 8) {
throw new MongoCompatibilityError(
'Auto-encryption requires a minimum MongoDB version of 4.2'
);
}
// Save sort or indexKeys based on the command being run
// the encrypt API serializes our JS objects to BSON to pass to the native code layer
// and then deserializes the encrypted result, the protocol level components
// of the command (ex. sort) are then converted to JS objects potentially losing
// import key order information. These fields are never encrypted so we can save the values
// from before the encryption and replace them after encryption has been performed
const sort: Map<string, number> | null = cmd.find || cmd.findAndModify ? cmd.sort : null;
const indexKeys: Map<string, number>[] | null = cmd.createIndexes
? cmd.indexes.map((index: { key: Map<string, number> }) => index.key)
: null;
const encrypted = await autoEncrypter.encrypt(ns.toString(), cmd, options);
// Replace the saved values
if (sort != null && (cmd.find || cmd.findAndModify)) {
encrypted.sort = sort;
}
if (indexKeys != null && cmd.createIndexes) {
for (const [offset, index] of indexKeys.entries()) {
// @ts-expect-error `encrypted` is a generic "command", but we've narrowed for only `createIndexes` commands here
encrypted.indexes[offset].key = index;
}
}
const encryptedResponse = await super.command(
ns,
encrypted,
options,
// Eventually we want to require `responseType` which means we would satisfy `T` as the return type.
// In the meantime, we want encryptedResponse to always be _at least_ a MongoDBResponse if not a more specific subclass
// So that we can ensure we have access to the on-demand APIs for decorate response
responseType ?? MongoDBResponse
);
const result = await autoEncrypter.decrypt(encryptedResponse.toBytes(), options);
const decryptedResponse = responseType?.make(result) ?? deserialize(result, options);
if (autoEncrypter[kDecorateResult]) {
if (responseType == null) {
decorateDecryptionResult(decryptedResponse, encryptedResponse.toObject(), true);
} else if (decryptedResponse instanceof CursorResponse) {
decryptedResponse.encryptedResponse = encryptedResponse;
}
}
return decryptedResponse;
}
}

View file

@ -0,0 +1,829 @@
import { clearTimeout, setTimeout } from 'timers';
import type { ObjectId } from '../bson';
import {
APM_EVENTS,
CONNECTION_CHECK_OUT_FAILED,
CONNECTION_CHECK_OUT_STARTED,
CONNECTION_CHECKED_IN,
CONNECTION_CHECKED_OUT,
CONNECTION_CLOSED,
CONNECTION_CREATED,
CONNECTION_POOL_CLEARED,
CONNECTION_POOL_CLOSED,
CONNECTION_POOL_CREATED,
CONNECTION_POOL_READY,
CONNECTION_READY
} from '../constants';
import {
type AnyError,
type MongoError,
MongoInvalidArgumentError,
MongoMissingCredentialsError,
MongoNetworkError,
MongoOperationTimeoutError,
MongoRuntimeError,
MongoServerError
} from '../error';
import { type Abortable, CancellationToken, TypedEventEmitter } from '../mongo_types';
import type { Server } from '../sdam/server';
import { type TimeoutContext, TimeoutError } from '../timeout';
import {
addAbortListener,
type Callback,
kDispose,
List,
makeCounter,
noop,
now,
promiseWithResolvers
} from '../utils';
import { connect } from './connect';
import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection';
import {
ConnectionCheckedInEvent,
ConnectionCheckedOutEvent,
ConnectionCheckOutFailedEvent,
ConnectionCheckOutStartedEvent,
ConnectionClosedEvent,
ConnectionCreatedEvent,
ConnectionPoolClearedEvent,
ConnectionPoolClosedEvent,
ConnectionPoolCreatedEvent,
ConnectionPoolReadyEvent,
ConnectionReadyEvent
} from './connection_pool_events';
import {
PoolClearedError,
PoolClearedOnNetworkError,
PoolClosedError,
WaitQueueTimeoutError
} from './errors';
import { ConnectionPoolMetrics } from './metrics';
/** @public */
export interface ConnectionPoolOptions extends Omit<ConnectionOptions, 'id' | 'generation'> {
/** The maximum number of connections that may be associated with a pool at a given time. This includes in use and available connections. */
maxPoolSize: number;
/** The minimum number of connections that MUST exist at any moment in a single connection pool. */
minPoolSize: number;
/** The maximum number of connections that may be in the process of being established concurrently by the connection pool. */
maxConnecting: number;
/** The maximum amount of time a connection should remain idle in the connection pool before being marked idle. */
maxIdleTimeMS: number;
/** The maximum amount of time operation execution should wait for a connection to become available. The default is 0 which means there is no limit. */
waitQueueTimeoutMS: number;
/** If we are in load balancer mode. */
loadBalanced: boolean;
/** @internal */
minPoolSizeCheckFrequencyMS?: number;
}
/** @internal */
export interface WaitQueueMember {
resolve: (conn: Connection) => void;
reject: (err: AnyError) => void;
cancelled: boolean;
checkoutTime: number;
}
/** @internal */
export const PoolState = Object.freeze({
paused: 'paused',
ready: 'ready',
closed: 'closed'
} as const);
type PoolState = (typeof PoolState)[keyof typeof PoolState];
/**
* @public
* @deprecated This interface is deprecated and will be removed in a future release as it is not used
* in the driver
*/
export interface CloseOptions {
force?: boolean;
}
/** @public */
export type ConnectionPoolEvents = {
connectionPoolCreated(event: ConnectionPoolCreatedEvent): void;
connectionPoolReady(event: ConnectionPoolReadyEvent): void;
connectionPoolClosed(event: ConnectionPoolClosedEvent): void;
connectionPoolCleared(event: ConnectionPoolClearedEvent): void;
connectionCreated(event: ConnectionCreatedEvent): void;
connectionReady(event: ConnectionReadyEvent): void;
connectionClosed(event: ConnectionClosedEvent): void;
connectionCheckOutStarted(event: ConnectionCheckOutStartedEvent): void;
connectionCheckOutFailed(event: ConnectionCheckOutFailedEvent): void;
connectionCheckedOut(event: ConnectionCheckedOutEvent): void;
connectionCheckedIn(event: ConnectionCheckedInEvent): void;
} & Omit<ConnectionEvents, 'close' | 'message'>;
/**
* A pool of connections which dynamically resizes, and emit events related to pool activity
* @internal
*/
export class ConnectionPool extends TypedEventEmitter<ConnectionPoolEvents> {
public options: Readonly<ConnectionPoolOptions>;
/** An integer representing the SDAM generation of the pool */
public generation: number;
/** A map of generations to service ids */
public serviceGenerations: Map<string, number>;
private poolState: PoolState;
private server: Server;
private connections: List<Connection>;
private pending: number;
private checkedOut: Set<Connection>;
private minPoolSizeTimer?: NodeJS.Timeout;
private connectionCounter: Generator<number>;
private cancellationToken: CancellationToken;
private waitQueue: List<WaitQueueMember>;
private metrics: ConnectionPoolMetrics;
private processingWaitQueue: boolean;
/**
* Emitted when the connection pool is created.
* @event
*/
static readonly CONNECTION_POOL_CREATED = CONNECTION_POOL_CREATED;
/**
* Emitted once when the connection pool is closed
* @event
*/
static readonly CONNECTION_POOL_CLOSED = CONNECTION_POOL_CLOSED;
/**
* Emitted each time the connection pool is cleared and it's generation incremented
* @event
*/
static readonly CONNECTION_POOL_CLEARED = CONNECTION_POOL_CLEARED;
/**
* Emitted each time the connection pool is marked ready
* @event
*/
static readonly CONNECTION_POOL_READY = CONNECTION_POOL_READY;
/**
* Emitted when a connection is created.
* @event
*/
static readonly CONNECTION_CREATED = CONNECTION_CREATED;
/**
* Emitted when a connection becomes established, and is ready to use
* @event
*/
static readonly CONNECTION_READY = CONNECTION_READY;
/**
* Emitted when a connection is closed
* @event
*/
static readonly CONNECTION_CLOSED = CONNECTION_CLOSED;
/**
* Emitted when an attempt to check out a connection begins
* @event
*/
static readonly CONNECTION_CHECK_OUT_STARTED = CONNECTION_CHECK_OUT_STARTED;
/**
* Emitted when an attempt to check out a connection fails
* @event
*/
static readonly CONNECTION_CHECK_OUT_FAILED = CONNECTION_CHECK_OUT_FAILED;
/**
* Emitted each time a connection is successfully checked out of the connection pool
* @event
*/
static readonly CONNECTION_CHECKED_OUT = CONNECTION_CHECKED_OUT;
/**
* Emitted each time a connection is successfully checked into the connection pool
* @event
*/
static readonly CONNECTION_CHECKED_IN = CONNECTION_CHECKED_IN;
constructor(server: Server, options: ConnectionPoolOptions) {
super();
this.on('error', noop);
this.options = Object.freeze({
connectionType: Connection,
...options,
maxPoolSize: options.maxPoolSize ?? 100,
minPoolSize: options.minPoolSize ?? 0,
maxConnecting: options.maxConnecting ?? 2,
maxIdleTimeMS: options.maxIdleTimeMS ?? 0,
waitQueueTimeoutMS: options.waitQueueTimeoutMS ?? 0,
minPoolSizeCheckFrequencyMS: options.minPoolSizeCheckFrequencyMS ?? 100,
autoEncrypter: options.autoEncrypter
});
if (this.options.minPoolSize > this.options.maxPoolSize) {
throw new MongoInvalidArgumentError(
'Connection pool minimum size must not be greater than maximum pool size'
);
}
this.poolState = PoolState.paused;
this.server = server;
this.connections = new List();
this.pending = 0;
this.checkedOut = new Set();
this.minPoolSizeTimer = undefined;
this.generation = 0;
this.serviceGenerations = new Map();
this.connectionCounter = makeCounter(1);
this.cancellationToken = new CancellationToken();
this.cancellationToken.setMaxListeners(Infinity);
this.waitQueue = new List();
this.metrics = new ConnectionPoolMetrics();
this.processingWaitQueue = false;
this.mongoLogger = this.server.topology.client?.mongoLogger;
this.component = 'connection';
process.nextTick(() => {
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CREATED, new ConnectionPoolCreatedEvent(this));
});
}
/** The address of the endpoint the pool is connected to */
get address(): string {
return this.options.hostAddress.toString();
}
/**
* Check if the pool has been closed
*
* TODO(NODE-3263): We can remove this property once shell no longer needs it
*/
get closed(): boolean {
return this.poolState === PoolState.closed;
}
/** An integer expressing how many total connections (available + pending + in use) the pool currently has */
get totalConnectionCount(): number {
return (
this.availableConnectionCount + this.pendingConnectionCount + this.currentCheckedOutCount
);
}
/** An integer expressing how many connections are currently available in the pool. */
get availableConnectionCount(): number {
return this.connections.length;
}
get pendingConnectionCount(): number {
return this.pending;
}
get currentCheckedOutCount(): number {
return this.checkedOut.size;
}
get waitQueueSize(): number {
return this.waitQueue.length;
}
get loadBalanced(): boolean {
return this.options.loadBalanced;
}
get serverError() {
return this.server.description.error;
}
/**
* This is exposed ONLY for use in mongosh, to enable
* killing all connections if a user quits the shell with
* operations in progress.
*
* This property may be removed as a part of NODE-3263.
*/
get checkedOutConnections() {
return this.checkedOut;
}
/**
* Get the metrics information for the pool when a wait queue timeout occurs.
*/
private waitQueueErrorMetrics(): string {
return this.metrics.info(this.options.maxPoolSize);
}
/**
* Set the pool state to "ready"
*/
ready(): void {
if (this.poolState !== PoolState.paused) {
return;
}
this.poolState = PoolState.ready;
this.emitAndLog(ConnectionPool.CONNECTION_POOL_READY, new ConnectionPoolReadyEvent(this));
clearTimeout(this.minPoolSizeTimer);
this.ensureMinPoolSize();
}
/**
* Check a connection out of this pool. The connection will continue to be tracked, but no reference to it
* will be held by the pool. This means that if a connection is checked out it MUST be checked back in or
* explicitly destroyed by the new owner.
*/
async checkOut(options: { timeoutContext: TimeoutContext } & Abortable): Promise<Connection> {
const checkoutTime = now();
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_STARTED,
new ConnectionCheckOutStartedEvent(this)
);
const { promise, resolve, reject } = promiseWithResolvers<Connection>();
const timeout = options.timeoutContext.connectionCheckoutTimeout;
const waitQueueMember: WaitQueueMember = {
resolve,
reject,
cancelled: false,
checkoutTime
};
const abortListener = addAbortListener(options.signal, function () {
waitQueueMember.cancelled = true;
reject(this.reason);
});
this.waitQueue.push(waitQueueMember);
process.nextTick(() => this.processWaitQueue());
try {
timeout?.throwIfExpired();
return await (timeout ? Promise.race([promise, timeout]) : promise);
} catch (error) {
if (TimeoutError.is(error)) {
timeout?.clear();
waitQueueMember.cancelled = true;
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_FAILED,
new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime)
);
const timeoutError = new WaitQueueTimeoutError(
this.loadBalanced
? this.waitQueueErrorMetrics()
: 'Timed out while checking out a connection from connection pool',
this.address
);
if (options.timeoutContext.csotEnabled()) {
throw new MongoOperationTimeoutError('Timed out during connection checkout', {
cause: timeoutError
});
}
throw timeoutError;
}
throw error;
} finally {
abortListener?.[kDispose]();
timeout?.clear();
}
}
/**
* Check a connection into the pool.
*
* @param connection - The connection to check in
*/
checkIn(connection: Connection): void {
if (!this.checkedOut.has(connection)) {
return;
}
const poolClosed = this.closed;
const stale = this.connectionIsStale(connection);
const willDestroy = !!(poolClosed || stale || connection.closed);
if (!willDestroy) {
connection.markAvailable();
this.connections.unshift(connection);
}
this.checkedOut.delete(connection);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECKED_IN,
new ConnectionCheckedInEvent(this, connection)
);
if (willDestroy) {
const reason = connection.closed ? 'error' : poolClosed ? 'poolClosed' : 'stale';
this.destroyConnection(connection, reason);
}
process.nextTick(() => this.processWaitQueue());
}
/**
* Clear the pool
*
* Pool reset is handled by incrementing the pool's generation count. Any existing connection of a
* previous generation will eventually be pruned during subsequent checkouts.
*/
clear(options: { serviceId?: ObjectId; interruptInUseConnections?: boolean } = {}): void {
if (this.closed) {
return;
}
// handle load balanced case
if (this.loadBalanced) {
const { serviceId } = options;
if (!serviceId) {
throw new MongoRuntimeError(
'ConnectionPool.clear() called in load balanced mode with no serviceId.'
);
}
const sid = serviceId.toHexString();
const generation = this.serviceGenerations.get(sid);
// Only need to worry if the generation exists, since it should
// always be there but typescript needs the check.
if (generation == null) {
throw new MongoRuntimeError('Service generations are required in load balancer mode.');
} else {
// Increment the generation for the service id.
this.serviceGenerations.set(sid, generation + 1);
}
this.emitAndLog(
ConnectionPool.CONNECTION_POOL_CLEARED,
new ConnectionPoolClearedEvent(this, { serviceId })
);
return;
}
// handle non load-balanced case
const interruptInUseConnections = options.interruptInUseConnections ?? false;
const oldGeneration = this.generation;
this.generation += 1;
const alreadyPaused = this.poolState === PoolState.paused;
this.poolState = PoolState.paused;
this.clearMinPoolSizeTimer();
if (!alreadyPaused) {
this.emitAndLog(
ConnectionPool.CONNECTION_POOL_CLEARED,
new ConnectionPoolClearedEvent(this, {
interruptInUseConnections
})
);
}
if (interruptInUseConnections) {
process.nextTick(() => this.interruptInUseConnections(oldGeneration));
}
this.processWaitQueue();
}
/**
* Closes all stale in-use connections in the pool with a resumable PoolClearedOnNetworkError.
*
* Only connections where `connection.generation <= minGeneration` are killed.
*/
private interruptInUseConnections(minGeneration: number) {
for (const connection of this.checkedOut) {
if (connection.generation <= minGeneration) {
connection.onError(new PoolClearedOnNetworkError(this));
this.checkIn(connection);
}
}
}
/** Close the pool */
close(): void {
if (this.closed) {
return;
}
// immediately cancel any in-flight connections
this.cancellationToken.emit('cancel');
// end the connection counter
if (typeof this.connectionCounter.return === 'function') {
this.connectionCounter.return(undefined);
}
this.poolState = PoolState.closed;
this.clearMinPoolSizeTimer();
this.processWaitQueue();
for (const conn of this.connections) {
this.emitAndLog(
ConnectionPool.CONNECTION_CLOSED,
new ConnectionClosedEvent(this, conn, 'poolClosed')
);
conn.destroy();
}
this.connections.clear();
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CLOSED, new ConnectionPoolClosedEvent(this));
}
/**
* @internal
* Reauthenticate a connection
*/
async reauthenticate(connection: Connection): Promise<void> {
const authContext = connection.authContext;
if (!authContext) {
throw new MongoRuntimeError('No auth context found on connection.');
}
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError(
'Connection is missing credentials when asked to reauthenticate'
);
}
const resolvedCredentials = credentials.resolveAuthMechanism(connection.hello);
const provider = this.server.topology.client.s.authProviders.getOrCreateProvider(
resolvedCredentials.mechanism,
resolvedCredentials.mechanismProperties
);
if (!provider) {
throw new MongoMissingCredentialsError(
`Reauthenticate failed due to no auth provider for ${credentials.mechanism}`
);
}
await provider.reauth(authContext);
return;
}
/** Clear the min pool size timer */
private clearMinPoolSizeTimer(): void {
const minPoolSizeTimer = this.minPoolSizeTimer;
if (minPoolSizeTimer) {
clearTimeout(minPoolSizeTimer);
}
}
private destroyConnection(
connection: Connection,
reason: 'error' | 'idle' | 'stale' | 'poolClosed'
) {
this.emitAndLog(
ConnectionPool.CONNECTION_CLOSED,
new ConnectionClosedEvent(this, connection, reason)
);
// destroy the connection
connection.destroy();
}
private connectionIsStale(connection: Connection) {
const serviceId = connection.serviceId;
if (this.loadBalanced && serviceId) {
const sid = serviceId.toHexString();
const generation = this.serviceGenerations.get(sid);
return connection.generation !== generation;
}
return connection.generation !== this.generation;
}
private connectionIsIdle(connection: Connection) {
return !!(this.options.maxIdleTimeMS && connection.idleTime > this.options.maxIdleTimeMS);
}
/**
* Destroys a connection if the connection is perished.
*
* @returns `true` if the connection was destroyed, `false` otherwise.
*/
private destroyConnectionIfPerished(connection: Connection): boolean {
const isStale = this.connectionIsStale(connection);
const isIdle = this.connectionIsIdle(connection);
if (!isStale && !isIdle && !connection.closed) {
return false;
}
const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle';
this.destroyConnection(connection, reason);
return true;
}
private createConnection(callback: Callback<Connection>) {
const connectOptions: ConnectionOptions = {
...this.options,
id: this.connectionCounter.next().value,
generation: this.generation,
cancellationToken: this.cancellationToken,
mongoLogger: this.mongoLogger,
authProviders: this.server.topology.client.s.authProviders
};
this.pending++;
// This is our version of a "virtual" no-I/O connection as the spec requires
const connectionCreatedTime = now();
this.emitAndLog(
ConnectionPool.CONNECTION_CREATED,
new ConnectionCreatedEvent(this, { id: connectOptions.id })
);
connect(connectOptions).then(
connection => {
// The pool might have closed since we started trying to create a connection
if (this.poolState !== PoolState.ready) {
this.pending--;
connection.destroy();
callback(this.closed ? new PoolClosedError(this) : new PoolClearedError(this));
return;
}
// forward all events from the connection to the pool
for (const event of [...APM_EVENTS, Connection.CLUSTER_TIME_RECEIVED]) {
connection.on(event, (e: any) => this.emit(event, e));
}
if (this.loadBalanced) {
connection.on(Connection.PINNED, pinType => this.metrics.markPinned(pinType));
connection.on(Connection.UNPINNED, pinType => this.metrics.markUnpinned(pinType));
const serviceId = connection.serviceId;
if (serviceId) {
let generation;
const sid = serviceId.toHexString();
if ((generation = this.serviceGenerations.get(sid))) {
connection.generation = generation;
} else {
this.serviceGenerations.set(sid, 0);
connection.generation = 0;
}
}
}
connection.markAvailable();
this.emitAndLog(
ConnectionPool.CONNECTION_READY,
new ConnectionReadyEvent(this, connection, connectionCreatedTime)
);
this.pending--;
callback(undefined, connection);
},
error => {
this.pending--;
this.server.handleError(error);
this.emitAndLog(
ConnectionPool.CONNECTION_CLOSED,
new ConnectionClosedEvent(
this,
{ id: connectOptions.id, serviceId: undefined },
'error',
// TODO(NODE-5192): Remove this cast
error as MongoError
)
);
if (error instanceof MongoNetworkError || error instanceof MongoServerError) {
error.connectionGeneration = connectOptions.generation;
}
callback(error ?? new MongoRuntimeError('Connection creation failed without error'));
}
);
}
private ensureMinPoolSize() {
const minPoolSize = this.options.minPoolSize;
if (this.poolState !== PoolState.ready || minPoolSize === 0) {
return;
}
this.connections.prune(connection => this.destroyConnectionIfPerished(connection));
if (
this.totalConnectionCount < minPoolSize &&
this.pendingConnectionCount < this.options.maxConnecting
) {
// NOTE: ensureMinPoolSize should not try to get all the pending
// connection permits because that potentially delays the availability of
// the connection to a checkout request
this.createConnection((err, connection) => {
if (!err && connection) {
this.connections.push(connection);
process.nextTick(() => this.processWaitQueue());
}
if (this.poolState === PoolState.ready) {
clearTimeout(this.minPoolSizeTimer);
this.minPoolSizeTimer = setTimeout(
() => this.ensureMinPoolSize(),
this.options.minPoolSizeCheckFrequencyMS
);
}
});
} else {
clearTimeout(this.minPoolSizeTimer);
this.minPoolSizeTimer = setTimeout(
() => this.ensureMinPoolSize(),
this.options.minPoolSizeCheckFrequencyMS
);
}
}
private processWaitQueue() {
if (this.processingWaitQueue) {
return;
}
this.processingWaitQueue = true;
while (this.waitQueueSize) {
const waitQueueMember = this.waitQueue.first();
if (!waitQueueMember) {
this.waitQueue.shift();
continue;
}
if (waitQueueMember.cancelled) {
this.waitQueue.shift();
continue;
}
if (this.poolState !== PoolState.ready) {
const reason = this.closed ? 'poolClosed' : 'connectionError';
const error = this.closed ? new PoolClosedError(this) : new PoolClearedError(this);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_FAILED,
new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error)
);
this.waitQueue.shift();
waitQueueMember.reject(error);
continue;
}
if (!this.availableConnectionCount) {
break;
}
const connection = this.connections.shift();
if (!connection) {
break;
}
if (!this.destroyConnectionIfPerished(connection)) {
this.checkedOut.add(connection);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECKED_OUT,
new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime)
);
this.waitQueue.shift();
waitQueueMember.resolve(connection);
}
}
const { maxPoolSize, maxConnecting } = this.options;
while (
this.waitQueueSize > 0 &&
this.pendingConnectionCount < maxConnecting &&
(maxPoolSize === 0 || this.totalConnectionCount < maxPoolSize)
) {
const waitQueueMember = this.waitQueue.shift();
if (!waitQueueMember || waitQueueMember.cancelled) {
continue;
}
this.createConnection((err, connection) => {
if (waitQueueMember.cancelled) {
if (!err && connection) {
this.connections.push(connection);
}
} else {
if (err) {
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_FAILED,
// TODO(NODE-5192): Remove this cast
new ConnectionCheckOutFailedEvent(
this,
'connectionError',
waitQueueMember.checkoutTime,
err as MongoError
)
);
waitQueueMember.reject(err);
} else if (connection) {
this.checkedOut.add(connection);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECKED_OUT,
new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime)
);
waitQueueMember.resolve(connection);
}
}
process.nextTick(() => this.processWaitQueue());
});
}
this.processingWaitQueue = false;
}
}
/**
* A callback provided to `withConnection`
* @internal
*
* @param error - An error instance representing the error during the execution.
* @param connection - The managed connection which was checked out of the pool.
* @param callback - A function to call back after connection management is complete
*/
export type WithConnectionCallback = (
error: MongoError | undefined,
connection: Connection | undefined,
callback: Callback<Connection>
) => void;

View file

@ -0,0 +1,300 @@
import type { ObjectId } from '../bson';
import {
CONNECTION_CHECK_OUT_FAILED,
CONNECTION_CHECK_OUT_STARTED,
CONNECTION_CHECKED_IN,
CONNECTION_CHECKED_OUT,
CONNECTION_CLOSED,
CONNECTION_CREATED,
CONNECTION_POOL_CLEARED,
CONNECTION_POOL_CLOSED,
CONNECTION_POOL_CREATED,
CONNECTION_POOL_READY,
CONNECTION_READY
} from '../constants';
import type { MongoError } from '../error';
import { now } from '../utils';
import type { Connection } from './connection';
import type { ConnectionPool, ConnectionPoolOptions } from './connection_pool';
/**
* The base export class for all monitoring events published from the connection pool
* @public
* @category Event
*/
export abstract class ConnectionPoolMonitoringEvent {
/** A timestamp when the event was created */
time: Date;
/** The address (host/port pair) of the pool */
address: string;
/** @internal */
abstract name:
| typeof CONNECTION_CHECK_OUT_FAILED
| typeof CONNECTION_CHECK_OUT_STARTED
| typeof CONNECTION_CHECKED_IN
| typeof CONNECTION_CHECKED_OUT
| typeof CONNECTION_CLOSED
| typeof CONNECTION_CREATED
| typeof CONNECTION_POOL_CLEARED
| typeof CONNECTION_POOL_CLOSED
| typeof CONNECTION_POOL_CREATED
| typeof CONNECTION_POOL_READY
| typeof CONNECTION_READY;
/** @internal */
constructor(pool: ConnectionPool) {
this.time = new Date();
this.address = pool.address;
}
}
/**
* An event published when a connection pool is created
* @public
* @category Event
*/
export class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent {
/** The options used to create this connection pool */
options: Pick<
ConnectionPoolOptions,
'maxPoolSize' | 'minPoolSize' | 'maxConnecting' | 'maxIdleTimeMS' | 'waitQueueTimeoutMS'
>;
/** @internal */
name = CONNECTION_POOL_CREATED;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
const { maxConnecting, maxPoolSize, minPoolSize, maxIdleTimeMS, waitQueueTimeoutMS } =
pool.options;
this.options = { maxConnecting, maxPoolSize, minPoolSize, maxIdleTimeMS, waitQueueTimeoutMS };
}
}
/**
* An event published when a connection pool is ready
* @public
* @category Event
*/
export class ConnectionPoolReadyEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
name = CONNECTION_POOL_READY;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
}
}
/**
* An event published when a connection pool is closed
* @public
* @category Event
*/
export class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
name = CONNECTION_POOL_CLOSED;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
}
}
/**
* An event published when a connection pool creates a new connection
* @public
* @category Event
*/
export class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent {
/** A monotonically increasing, per-pool id for the newly created connection */
connectionId: number | '<monitor>';
/** @internal */
name = CONNECTION_CREATED;
/** @internal */
constructor(pool: ConnectionPool, connection: { id: number | '<monitor>' }) {
super(pool);
this.connectionId = connection.id;
}
}
/**
* An event published when a connection is ready for use
* @public
* @category Event
*/
export class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/**
* The time it took to establish the connection.
* In accordance with the definition of establishment of a connection
* specified by `ConnectionPoolOptions.maxConnecting`,
* it is the time elapsed between emitting a `ConnectionCreatedEvent`
* and emitting this event as part of the same checking out.
*
* Naturally, when establishing a connection is part of checking out,
* this duration is not greater than
* `ConnectionCheckedOutEvent.duration`.
*/
durationMS: number;
/** @internal */
name = CONNECTION_READY;
/** @internal */
constructor(pool: ConnectionPool, connection: Connection, connectionCreatedEventTime: number) {
super(pool);
this.durationMS = now() - connectionCreatedEventTime;
this.connectionId = connection.id;
}
}
/**
* An event published when a connection is closed
* @public
* @category Event
*/
export class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/** The reason the connection was closed */
reason: string;
serviceId?: ObjectId;
/** @internal */
name = CONNECTION_CLOSED;
/** @internal */
error: MongoError | null;
/** @internal */
constructor(
pool: ConnectionPool,
connection: Pick<Connection, 'id' | 'serviceId'>,
reason: 'idle' | 'stale' | 'poolClosed' | 'error',
error?: MongoError
) {
super(pool);
this.connectionId = connection.id;
this.reason = reason;
this.serviceId = connection.serviceId;
this.error = error ?? null;
}
}
/**
* An event published when a request to check a connection out begins
* @public
* @category Event
*/
export class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
name = CONNECTION_CHECK_OUT_STARTED;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
}
}
/**
* An event published when a request to check a connection out fails
* @public
* @category Event
*/
export class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent {
/** The reason the attempt to check out failed */
reason: string;
/** @internal */
error?: MongoError;
/** @internal */
name = CONNECTION_CHECK_OUT_FAILED;
/**
* The time it took to check out the connection.
* More specifically, the time elapsed between
* emitting a `ConnectionCheckOutStartedEvent`
* and emitting this event as part of the same check out.
*/
durationMS: number;
/** @internal */
constructor(
pool: ConnectionPool,
reason: 'poolClosed' | 'timeout' | 'connectionError',
checkoutTime: number,
error?: MongoError
) {
super(pool);
this.durationMS = now() - checkoutTime;
this.reason = reason;
this.error = error;
}
}
/**
* An event published when a connection is checked out of the connection pool
* @public
* @category Event
*/
export class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/** @internal */
name = CONNECTION_CHECKED_OUT;
/**
* The time it took to check out the connection.
* More specifically, the time elapsed between
* emitting a `ConnectionCheckOutStartedEvent`
* and emitting this event as part of the same checking out.
*
*/
durationMS: number;
/** @internal */
constructor(pool: ConnectionPool, connection: Connection, checkoutTime: number) {
super(pool);
this.durationMS = now() - checkoutTime;
this.connectionId = connection.id;
}
}
/**
* An event published when a connection is checked into the connection pool
* @public
* @category Event
*/
export class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/** @internal */
name = CONNECTION_CHECKED_IN;
/** @internal */
constructor(pool: ConnectionPool, connection: Connection) {
super(pool);
this.connectionId = connection.id;
}
}
/**
* An event published when a connection pool is cleared
* @public
* @category Event
*/
export class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
serviceId?: ObjectId;
interruptInUseConnections?: boolean;
/** @internal */
name = CONNECTION_POOL_CLEARED;
/** @internal */
constructor(
pool: ConnectionPool,
options: { serviceId?: ObjectId; interruptInUseConnections?: boolean } = {}
) {
super(pool);
this.serviceId = options.serviceId;
this.interruptInUseConnections = options.interruptInUseConnections;
}
}

View file

@ -0,0 +1,119 @@
import { MongoDriverError, MongoErrorLabel, MongoNetworkError } from '../error';
import type { ConnectionPool } from './connection_pool';
/**
* An error indicating a connection pool is closed
* @category Error
*/
export class PoolClosedError extends MongoDriverError {
/** The address of the connection pool */
address: string;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(pool: ConnectionPool) {
super('Attempted to check out a connection from closed connection pool');
this.address = pool.address;
}
override get name(): string {
return 'MongoPoolClosedError';
}
}
/**
* An error indicating a connection pool is currently paused
* @category Error
*/
export class PoolClearedError extends MongoNetworkError {
/** The address of the connection pool */
address: string;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(pool: ConnectionPool, message?: string) {
const errorMessage = message
? message
: `Connection pool for ${pool.address} was cleared because another operation failed with: "${pool.serverError?.message}"`;
super(errorMessage, pool.serverError ? { cause: pool.serverError } : undefined);
this.address = pool.address;
this.addErrorLabel(MongoErrorLabel.PoolRequstedRetry);
}
override get name(): string {
return 'MongoPoolClearedError';
}
}
/**
* An error indicating that a connection pool has been cleared after the monitor for that server timed out.
* @category Error
*/
export class PoolClearedOnNetworkError extends PoolClearedError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(pool: ConnectionPool) {
super(pool, `Connection to ${pool.address} interrupted due to server monitor timeout`);
}
override get name(): string {
return 'PoolClearedOnNetworkError';
}
}
/**
* An error thrown when a request to check out a connection times out
* @category Error
*/
export class WaitQueueTimeoutError extends MongoDriverError {
/** The address of the connection pool */
address: string;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string, address: string) {
super(message);
this.address = address;
}
override get name(): string {
return 'MongoWaitQueueTimeoutError';
}
}

View file

@ -0,0 +1,320 @@
import * as os from 'os';
import * as process from 'process';
import { BSON, type Document, Int32 } from '../../bson';
import { MongoInvalidArgumentError } from '../../error';
import type { MongoOptions } from '../../mongo_client';
import { fileIsAccessible } from '../../utils';
// eslint-disable-next-line @typescript-eslint/no-require-imports
const NODE_DRIVER_VERSION = require('../../../package.json').version;
/**
* @public
* @see https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.md#hello-command
*/
export interface ClientMetadata {
driver: {
name: string;
version: string;
};
os: {
type: string;
name?: NodeJS.Platform;
architecture?: string;
version?: string;
};
platform: string;
application?: {
name: string;
};
/** FaaS environment information */
env?: {
name: 'aws.lambda' | 'gcp.func' | 'azure.func' | 'vercel';
timeout_sec?: Int32;
memory_mb?: Int32;
region?: string;
url?: string;
};
}
/** @public */
export interface ClientMetadataOptions {
driverInfo?: {
name?: string;
version?: string;
platform?: string;
};
appName?: string;
}
/** @internal */
export class LimitedSizeDocument {
private document = new Map();
/** BSON overhead: Int32 + Null byte */
private documentSize = 5;
constructor(private maxSize: number) {}
/** Only adds key/value if the bsonByteLength is less than MAX_SIZE */
public ifItFitsItSits(key: string, value: Record<string, any> | string): boolean {
// The BSON byteLength of the new element is the same as serializing it to its own document
// subtracting the document size int32 and the null terminator.
const newElementSize = BSON.serialize(new Map().set(key, value)).byteLength - 5;
if (newElementSize + this.documentSize > this.maxSize) {
return false;
}
this.documentSize += newElementSize;
this.document.set(key, value);
return true;
}
toObject(): Document {
return BSON.deserialize(BSON.serialize(this.document), {
promoteLongs: false,
promoteBuffers: false,
promoteValues: false,
useBigInt64: false
});
}
}
type MakeClientMetadataOptions = Pick<MongoOptions, 'appName' | 'driverInfo'>;
/**
* From the specs:
* Implementors SHOULD cumulatively update fields in the following order until the document is under the size limit:
* 1. Omit fields from `env` except `env.name`.
* 2. Omit fields from `os` except `os.type`.
* 3. Omit the `env` document entirely.
* 4. Truncate `platform`. -- special we do not truncate this field
*/
export function makeClientMetadata(options: MakeClientMetadataOptions): ClientMetadata {
const metadataDocument = new LimitedSizeDocument(512);
const { appName = '' } = options;
// Add app name first, it must be sent
if (appName.length > 0) {
const name =
Buffer.byteLength(appName, 'utf8') <= 128
? options.appName
: Buffer.from(appName, 'utf8').subarray(0, 128).toString('utf8');
metadataDocument.ifItFitsItSits('application', { name });
}
const { name = '', version = '', platform = '' } = options.driverInfo;
const driverInfo = {
name: name.length > 0 ? `nodejs|${name}` : 'nodejs',
version: version.length > 0 ? `${NODE_DRIVER_VERSION}|${version}` : NODE_DRIVER_VERSION
};
if (!metadataDocument.ifItFitsItSits('driver', driverInfo)) {
throw new MongoInvalidArgumentError(
'Unable to include driverInfo name and version, metadata cannot exceed 512 bytes'
);
}
let runtimeInfo = getRuntimeInfo();
if (platform.length > 0) {
runtimeInfo = `${runtimeInfo}|${platform}`;
}
if (!metadataDocument.ifItFitsItSits('platform', runtimeInfo)) {
throw new MongoInvalidArgumentError(
'Unable to include driverInfo platform, metadata cannot exceed 512 bytes'
);
}
// Note: order matters, os.type is last so it will be removed last if we're at maxSize
const osInfo = new Map()
.set('name', process.platform)
.set('architecture', process.arch)
.set('version', os.release())
.set('type', os.type());
if (!metadataDocument.ifItFitsItSits('os', osInfo)) {
for (const key of osInfo.keys()) {
osInfo.delete(key);
if (osInfo.size === 0) break;
if (metadataDocument.ifItFitsItSits('os', osInfo)) break;
}
}
const faasEnv = getFAASEnv();
if (faasEnv != null) {
if (!metadataDocument.ifItFitsItSits('env', faasEnv)) {
for (const key of faasEnv.keys()) {
faasEnv.delete(key);
if (faasEnv.size === 0) break;
if (metadataDocument.ifItFitsItSits('env', faasEnv)) break;
}
}
}
return metadataDocument.toObject() as ClientMetadata;
}
let dockerPromise: Promise<boolean>;
/** @internal */
async function getContainerMetadata() {
const containerMetadata: Record<string, any> = {};
dockerPromise ??= fileIsAccessible('/.dockerenv');
const isDocker = await dockerPromise;
const { KUBERNETES_SERVICE_HOST = '' } = process.env;
const isKubernetes = KUBERNETES_SERVICE_HOST.length > 0 ? true : false;
if (isDocker) containerMetadata.runtime = 'docker';
if (isKubernetes) containerMetadata.orchestrator = 'kubernetes';
return containerMetadata;
}
/**
* @internal
* Re-add each metadata value.
* Attempt to add new env container metadata, but keep old data if it does not fit.
*/
export async function addContainerMetadata(originalMetadata: ClientMetadata) {
const containerMetadata = await getContainerMetadata();
if (Object.keys(containerMetadata).length === 0) return originalMetadata;
const extendedMetadata = new LimitedSizeDocument(512);
const extendedEnvMetadata = { ...originalMetadata?.env, container: containerMetadata };
for (const [key, val] of Object.entries(originalMetadata)) {
if (key !== 'env') {
extendedMetadata.ifItFitsItSits(key, val);
} else {
if (!extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata)) {
// add in old data if newer / extended metadata does not fit
extendedMetadata.ifItFitsItSits('env', val);
}
}
}
if (!('env' in originalMetadata)) {
extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata);
}
return extendedMetadata.toObject();
}
/**
* Collects FaaS metadata.
* - `name` MUST be the last key in the Map returned.
*/
export function getFAASEnv(): Map<string, string | Int32> | null {
const {
AWS_EXECUTION_ENV = '',
AWS_LAMBDA_RUNTIME_API = '',
FUNCTIONS_WORKER_RUNTIME = '',
K_SERVICE = '',
FUNCTION_NAME = '',
VERCEL = '',
AWS_LAMBDA_FUNCTION_MEMORY_SIZE = '',
AWS_REGION = '',
FUNCTION_MEMORY_MB = '',
FUNCTION_REGION = '',
FUNCTION_TIMEOUT_SEC = '',
VERCEL_REGION = ''
} = process.env;
const isAWSFaaS =
AWS_EXECUTION_ENV.startsWith('AWS_Lambda_') || AWS_LAMBDA_RUNTIME_API.length > 0;
const isAzureFaaS = FUNCTIONS_WORKER_RUNTIME.length > 0;
const isGCPFaaS = K_SERVICE.length > 0 || FUNCTION_NAME.length > 0;
const isVercelFaaS = VERCEL.length > 0;
// Note: order matters, name must always be the last key
const faasEnv = new Map();
// When isVercelFaaS is true so is isAWSFaaS; Vercel inherits the AWS env
if (isVercelFaaS && !(isAzureFaaS || isGCPFaaS)) {
if (VERCEL_REGION.length > 0) {
faasEnv.set('region', VERCEL_REGION);
}
faasEnv.set('name', 'vercel');
return faasEnv;
}
if (isAWSFaaS && !(isAzureFaaS || isGCPFaaS || isVercelFaaS)) {
if (AWS_REGION.length > 0) {
faasEnv.set('region', AWS_REGION);
}
if (
AWS_LAMBDA_FUNCTION_MEMORY_SIZE.length > 0 &&
Number.isInteger(+AWS_LAMBDA_FUNCTION_MEMORY_SIZE)
) {
faasEnv.set('memory_mb', new Int32(AWS_LAMBDA_FUNCTION_MEMORY_SIZE));
}
faasEnv.set('name', 'aws.lambda');
return faasEnv;
}
if (isAzureFaaS && !(isGCPFaaS || isAWSFaaS || isVercelFaaS)) {
faasEnv.set('name', 'azure.func');
return faasEnv;
}
if (isGCPFaaS && !(isAzureFaaS || isAWSFaaS || isVercelFaaS)) {
if (FUNCTION_REGION.length > 0) {
faasEnv.set('region', FUNCTION_REGION);
}
if (FUNCTION_MEMORY_MB.length > 0 && Number.isInteger(+FUNCTION_MEMORY_MB)) {
faasEnv.set('memory_mb', new Int32(FUNCTION_MEMORY_MB));
}
if (FUNCTION_TIMEOUT_SEC.length > 0 && Number.isInteger(+FUNCTION_TIMEOUT_SEC)) {
faasEnv.set('timeout_sec', new Int32(FUNCTION_TIMEOUT_SEC));
}
faasEnv.set('name', 'gcp.func');
return faasEnv;
}
return null;
}
/**
* @internal
* This type represents the global Deno object and the minimal type contract we expect it to satisfy.
*/
declare const Deno: { version?: { deno?: string } } | undefined;
/**
* @internal
* This type represents the global Bun object and the minimal type contract we expect it to satisfy.
*/
declare const Bun: { (): void; version?: string } | undefined;
/**
* @internal
* Get current JavaScript runtime platform
*
* NOTE: The version information fetching is intentionally written defensively
* to avoid having a released driver version that becomes incompatible
* with a future change to these global objects.
*/
function getRuntimeInfo(): string {
if ('Deno' in globalThis) {
const version = typeof Deno?.version?.deno === 'string' ? Deno?.version?.deno : '0.0.0-unknown';
return `Deno v${version}, ${os.endianness()}`;
}
if ('Bun' in globalThis) {
const version = typeof Bun?.version === 'string' ? Bun?.version : '0.0.0-unknown';
return `Bun v${version}, ${os.endianness()}`;
}
return `Node.js ${process.version}, ${os.endianness()}`;
}

View file

@ -0,0 +1,58 @@
/** @internal */
export class ConnectionPoolMetrics {
static readonly TXN = 'txn' as const;
static readonly CURSOR = 'cursor' as const;
static readonly OTHER = 'other' as const;
txnConnections = 0;
cursorConnections = 0;
otherConnections = 0;
/**
* Mark a connection as pinned for a specific operation.
*/
markPinned(pinType: string): void {
if (pinType === ConnectionPoolMetrics.TXN) {
this.txnConnections += 1;
} else if (pinType === ConnectionPoolMetrics.CURSOR) {
this.cursorConnections += 1;
} else {
this.otherConnections += 1;
}
}
/**
* Unmark a connection as pinned for an operation.
*/
markUnpinned(pinType: string): void {
if (pinType === ConnectionPoolMetrics.TXN) {
this.txnConnections -= 1;
} else if (pinType === ConnectionPoolMetrics.CURSOR) {
this.cursorConnections -= 1;
} else {
this.otherConnections -= 1;
}
}
/**
* Return information about the cmap metrics as a string.
*/
info(maxPoolSize: number): string {
return (
'Timed out while checking out a connection from connection pool: ' +
`maxPoolSize: ${maxPoolSize}, ` +
`connections in use by cursors: ${this.cursorConnections}, ` +
`connections in use by transactions: ${this.txnConnections}, ` +
`connections in use by other operations: ${this.otherConnections}`
);
}
/**
* Reset the metrics to the initial values.
*/
reset(): void {
this.txnConnections = 0;
this.cursorConnections = 0;
this.otherConnections = 0;
}
}

View file

@ -0,0 +1,96 @@
import { type Document, type Double, Long } from '../bson';
import { ServerType } from '../sdam/common';
import { parseServerType } from '../sdam/server_description';
import type { CompressorName } from './wire_protocol/compression';
const RESPONSE_FIELDS = [
'minWireVersion',
'maxWireVersion',
'maxBsonObjectSize',
'maxMessageSizeBytes',
'maxWriteBatchSize',
'logicalSessionTimeoutMinutes'
] as const;
/** @public */
export interface StreamDescriptionOptions {
compressors?: CompressorName[];
logicalSessionTimeoutMinutes?: number;
loadBalanced: boolean;
}
/** @public */
export class StreamDescription {
address: string;
type: ServerType;
minWireVersion?: number;
maxWireVersion?: number;
maxBsonObjectSize: number;
maxMessageSizeBytes: number;
maxWriteBatchSize: number;
compressors: CompressorName[];
compressor?: CompressorName;
logicalSessionTimeoutMinutes?: number;
loadBalanced: boolean;
__nodejs_mock_server__?: boolean;
zlibCompressionLevel?: number;
serverConnectionId: bigint | null;
public hello: Document | null = null;
constructor(address: string, options?: StreamDescriptionOptions) {
this.address = address;
this.type = ServerType.Unknown;
this.minWireVersion = undefined;
this.maxWireVersion = undefined;
this.maxBsonObjectSize = 16777216;
this.maxMessageSizeBytes = 48000000;
this.maxWriteBatchSize = 100000;
this.logicalSessionTimeoutMinutes = options?.logicalSessionTimeoutMinutes;
this.loadBalanced = !!options?.loadBalanced;
this.compressors =
options && options.compressors && Array.isArray(options.compressors)
? options.compressors
: [];
this.serverConnectionId = null;
}
receiveResponse(response: Document | null): void {
if (response == null) {
return;
}
this.hello = response;
this.type = parseServerType(response);
if ('connectionId' in response) {
this.serverConnectionId = this.parseServerConnectionID(response.connectionId);
} else {
this.serverConnectionId = null;
}
for (const field of RESPONSE_FIELDS) {
if (response[field] != null) {
this[field] = response[field];
}
// testing case
if ('__nodejs_mock_server__' in response) {
this.__nodejs_mock_server__ = response['__nodejs_mock_server__'];
}
}
if (response.compression) {
this.compressor = this.compressors.filter(c => response.compression?.includes(c))[0];
}
}
/* @internal */
parseServerConnectionID(serverConnectionId: number | Double | bigint | Long): bigint {
// Connection ids are always integral, so it's safe to coerce doubles as well as
// any integral type.
return Long.isLong(serverConnectionId)
? serverConnectionId.toBigInt()
: // @ts-expect-error: Doubles are coercible to number
BigInt(serverConnectionId);
}
}

View file

@ -0,0 +1,196 @@
import { promisify } from 'util';
import * as zlib from 'zlib';
import { LEGACY_HELLO_COMMAND } from '../../constants';
import { getSnappy, getZstdLibrary, type SnappyLib, type ZStandard } from '../../deps';
import { MongoDecompressionError, MongoInvalidArgumentError } from '../../error';
import {
type MessageHeader,
OpCompressedRequest,
OpMsgResponse,
OpReply,
type WriteProtocolMessageType
} from '../commands';
import { OP_COMPRESSED, OP_MSG } from './constants';
/** @public */
export const Compressor = Object.freeze({
none: 0,
snappy: 1,
zlib: 2,
zstd: 3
} as const);
/** @public */
export type Compressor = (typeof Compressor)[CompressorName];
/** @public */
export type CompressorName = keyof typeof Compressor;
export const uncompressibleCommands = new Set([
LEGACY_HELLO_COMMAND,
'saslStart',
'saslContinue',
'getnonce',
'authenticate',
'createUser',
'updateUser',
'copydbSaslStart',
'copydbgetnonce',
'copydb'
]);
const ZSTD_COMPRESSION_LEVEL = 3;
const zlibInflate = promisify(zlib.inflate.bind(zlib));
const zlibDeflate = promisify(zlib.deflate.bind(zlib));
let zstd: ZStandard;
let Snappy: SnappyLib | null = null;
function loadSnappy() {
if (Snappy == null) {
const snappyImport = getSnappy();
if ('kModuleError' in snappyImport) {
throw snappyImport.kModuleError;
}
Snappy = snappyImport;
}
return Snappy;
}
// Facilitate compressing a message using an agreed compressor
export async function compress(
options: { zlibCompressionLevel: number; agreedCompressor: CompressorName },
dataToBeCompressed: Buffer
): Promise<Buffer> {
const zlibOptions = {} as zlib.ZlibOptions;
switch (options.agreedCompressor) {
case 'snappy': {
Snappy ??= loadSnappy();
return await Snappy.compress(dataToBeCompressed);
}
case 'zstd': {
loadZstd();
if ('kModuleError' in zstd) {
throw zstd['kModuleError'];
}
return await zstd.compress(dataToBeCompressed, ZSTD_COMPRESSION_LEVEL);
}
case 'zlib': {
if (options.zlibCompressionLevel) {
zlibOptions.level = options.zlibCompressionLevel;
}
return await zlibDeflate(dataToBeCompressed, zlibOptions);
}
default: {
throw new MongoInvalidArgumentError(
`Unknown compressor ${options.agreedCompressor} failed to compress`
);
}
}
}
// Decompress a message using the given compressor
export async function decompress(compressorID: number, compressedData: Buffer): Promise<Buffer> {
if (
compressorID !== Compressor.snappy &&
compressorID !== Compressor.zstd &&
compressorID !== Compressor.zlib &&
compressorID !== Compressor.none
) {
throw new MongoDecompressionError(
`Server sent message compressed using an unsupported compressor. (Received compressor ID ${compressorID})`
);
}
switch (compressorID) {
case Compressor.snappy: {
Snappy ??= loadSnappy();
return await Snappy.uncompress(compressedData, { asBuffer: true });
}
case Compressor.zstd: {
loadZstd();
if ('kModuleError' in zstd) {
throw zstd['kModuleError'];
}
return await zstd.decompress(compressedData);
}
case Compressor.zlib: {
return await zlibInflate(compressedData);
}
default: {
return compressedData;
}
}
}
/**
* Load ZStandard if it is not already set.
*/
function loadZstd() {
if (!zstd) {
zstd = getZstdLibrary();
}
}
const MESSAGE_HEADER_SIZE = 16;
/**
* @internal
*
* Compresses an OP_MSG or OP_QUERY message, if compression is configured. This method
* also serializes the command to BSON.
*/
export async function compressCommand(
command: WriteProtocolMessageType,
description: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number }
): Promise<Buffer> {
const finalCommand =
description.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command)
? command
: new OpCompressedRequest(command, {
agreedCompressor: description.agreedCompressor ?? 'none',
zlibCompressionLevel: description.zlibCompressionLevel ?? 0
});
const data = await finalCommand.toBin();
return Buffer.concat(data);
}
/**
* @internal
*
* Decompresses an OP_MSG or OP_QUERY response from the server, if compression is configured.
*
* This method does not parse the response's BSON.
*/
export async function decompressResponse(message: Buffer): Promise<OpMsgResponse | OpReply> {
const messageHeader: MessageHeader = {
length: message.readInt32LE(0),
requestId: message.readInt32LE(4),
responseTo: message.readInt32LE(8),
opCode: message.readInt32LE(12)
};
if (messageHeader.opCode !== OP_COMPRESSED) {
const ResponseType = messageHeader.opCode === OP_MSG ? OpMsgResponse : OpReply;
const messageBody = message.subarray(MESSAGE_HEADER_SIZE);
return new ResponseType(message, messageHeader, messageBody);
}
const header: MessageHeader = {
...messageHeader,
fromCompressed: true,
opCode: message.readInt32LE(MESSAGE_HEADER_SIZE),
length: message.readInt32LE(MESSAGE_HEADER_SIZE + 4)
};
const compressorID = message[MESSAGE_HEADER_SIZE + 8];
const compressedBuffer = message.slice(MESSAGE_HEADER_SIZE + 9);
// recalculate based on wrapped opcode
const ResponseType = header.opCode === OP_MSG ? OpMsgResponse : OpReply;
const messageBody = await decompress(compressorID, compressedBuffer);
if (messageBody.length !== header.length) {
throw new MongoDecompressionError('Message body and message header must be the same length');
}
return new ResponseType(message, header, messageBody);
}

View file

@ -0,0 +1,13 @@
export const MIN_SUPPORTED_SERVER_VERSION = '4.0';
export const MAX_SUPPORTED_SERVER_VERSION = '8.0';
export const MIN_SUPPORTED_WIRE_VERSION = 7;
export const MAX_SUPPORTED_WIRE_VERSION = 25;
export const MIN_SUPPORTED_QE_WIRE_VERSION = 21;
export const MIN_SUPPORTED_QE_SERVER_VERSION = '7.0';
export const OP_REPLY = 1;
export const OP_UPDATE = 2001;
export const OP_INSERT = 2002;
export const OP_QUERY = 2004;
export const OP_DELETE = 2006;
export const OP_COMPRESSED = 2012;
export const OP_MSG = 2013;

View file

@ -0,0 +1,135 @@
import { type EventEmitter } from 'events';
import { type Abortable } from '../../mongo_types';
import { type TimeoutContext } from '../../timeout';
import { addAbortListener, kDispose, List, promiseWithResolvers } from '../../utils';
/**
* @internal
* An object holding references to a promise's resolve and reject functions.
*/
type PendingPromises = Omit<
ReturnType<typeof promiseWithResolvers<IteratorResult<Buffer>>>,
'promise'
>;
/**
* onData is adapted from Node.js' events.on helper
* https://nodejs.org/api/events.html#eventsonemitter-eventname-options
*
* Returns an AsyncIterator that iterates each 'data' event emitted from emitter.
* It will reject upon an error event.
*/
export function onData(
emitter: EventEmitter,
{ timeoutContext, signal }: { timeoutContext?: TimeoutContext } & Abortable
) {
signal?.throwIfAborted();
// Setup pending events and pending promise lists
/**
* When the caller has not yet called .next(), we store the
* value from the event in this list. Next time they call .next()
* we pull the first value out of this list and resolve a promise with it.
*/
const unconsumedEvents = new List<Buffer>();
/**
* When there has not yet been an event, a new promise will be created
* and implicitly stored in this list. When an event occurs we take the first
* promise in this list and resolve it.
*/
const unconsumedPromises = new List<PendingPromises>();
/**
* Stored an error created by an error event.
* This error will turn into a rejection for the subsequent .next() call
*/
let error: Error | null = null;
/** Set to true only after event listeners have been removed. */
let finished = false;
const iterator: AsyncGenerator<Buffer> = {
next() {
// First, we consume all unread events
const value = unconsumedEvents.shift();
if (value != null) {
return Promise.resolve({ value, done: false });
}
// Then we error, if an error happened
// This happens one time if at all, because after 'error'
// we stop listening
if (error != null) {
const p = Promise.reject(error);
// Only the first element errors
error = null;
return p;
}
// If the iterator is finished, resolve to done
if (finished) return closeHandler();
// Wait until an event happens
const { promise, resolve, reject } = promiseWithResolvers<IteratorResult<Buffer>>();
unconsumedPromises.push({ resolve, reject });
return promise;
},
return() {
return closeHandler();
},
throw(err: Error) {
errorHandler(err);
return Promise.resolve({ value: undefined, done: true });
},
[Symbol.asyncIterator]() {
return this;
}
};
// Adding event handlers
emitter.on('data', eventHandler);
emitter.on('error', errorHandler);
const abortListener = addAbortListener(signal, function () {
errorHandler(this.reason);
});
const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead;
timeoutForSocketRead?.throwIfExpired();
timeoutForSocketRead?.then(undefined, errorHandler);
return iterator;
function eventHandler(value: Buffer) {
const promise = unconsumedPromises.shift();
if (promise != null) promise.resolve({ value, done: false });
else unconsumedEvents.push(value);
}
function errorHandler(err: Error) {
const promise = unconsumedPromises.shift();
if (promise != null) promise.reject(err);
else error = err;
void closeHandler();
}
function closeHandler() {
// Adding event handlers
emitter.off('data', eventHandler);
emitter.off('error', errorHandler);
abortListener?.[kDispose]();
finished = true;
timeoutForSocketRead?.clear();
const doneResult = { value: undefined, done: finished } as const;
for (const promise of unconsumedPromises) {
promise.resolve(doneResult);
}
return Promise.resolve(doneResult);
}
}

View file

@ -0,0 +1,353 @@
import {
Binary,
type BSONElement,
BSONError,
BSONType,
deserialize,
type DeserializeOptions,
getBigInt64LE,
getFloat64LE,
getInt32LE,
ObjectId,
parseToElementsToArray,
Timestamp,
toUTF8
} from '../../../bson';
// eslint-disable-next-line no-restricted-syntax
const enum BSONElementOffset {
type = 0,
nameOffset = 1,
nameLength = 2,
offset = 3,
length = 4
}
/** @internal */
export type JSTypeOf = {
[BSONType.null]: null;
[BSONType.undefined]: null;
[BSONType.double]: number;
[BSONType.int]: number;
[BSONType.long]: bigint;
[BSONType.timestamp]: Timestamp;
[BSONType.binData]: Binary;
[BSONType.bool]: boolean;
[BSONType.objectId]: ObjectId;
[BSONType.string]: string;
[BSONType.date]: Date;
[BSONType.object]: OnDemandDocument;
[BSONType.array]: OnDemandDocument;
};
/** @internal */
type CachedBSONElement = { element: BSONElement; value: any | undefined };
/**
* @internal
*
* Options for `OnDemandDocument.toObject()`. Validation is required to ensure
* that callers provide utf8 validation options. */
export type OnDemandDocumentDeserializeOptions = Omit<DeserializeOptions, 'validation'> &
Required<Pick<DeserializeOptions, 'validation'>>;
/** @internal */
export class OnDemandDocument {
/**
* Maps JS strings to elements and jsValues for speeding up subsequent lookups.
* - If `false` then name does not exist in the BSON document
* - If `CachedBSONElement` instance name exists
* - If `cache[name].value == null` jsValue has not yet been parsed
* - Null/Undefined values do not get cached because they are zero-length values.
*/
private readonly cache: Record<string, CachedBSONElement | false | undefined> =
Object.create(null);
/** Caches the index of elements that have been named */
private readonly indexFound: Record<number, boolean> = Object.create(null);
/** All bson elements in this document */
private readonly elements: ReadonlyArray<BSONElement>;
constructor(
/** BSON bytes, this document begins at offset */
protected readonly bson: Uint8Array,
/** The start of the document */
private readonly offset = 0,
/** If this is an embedded document, indicates if this was a BSON array */
public readonly isArray = false,
/** If elements was already calculated */
elements?: BSONElement[]
) {
this.elements = elements ?? parseToElementsToArray(this.bson, offset);
}
/** Only supports basic latin strings */
private isElementName(name: string, element: BSONElement): boolean {
const nameLength = element[BSONElementOffset.nameLength];
const nameOffset = element[BSONElementOffset.nameOffset];
if (name.length !== nameLength) return false;
const nameEnd = nameOffset + nameLength;
for (
let byteIndex = nameOffset, charIndex = 0;
charIndex < name.length && byteIndex < nameEnd;
charIndex++, byteIndex++
) {
if (this.bson[byteIndex] !== name.charCodeAt(charIndex)) return false;
}
return true;
}
/**
* Seeks into the elements array for an element matching the given name.
*
* @remarks
* Caching:
* - Caches the existence of a property making subsequent look ups for non-existent properties return immediately
* - Caches names mapped to elements to avoid reiterating the array and comparing the name again
* - Caches the index at which an element has been found to prevent rechecking against elements already determined to belong to another name
*
* @param name - a basic latin string name of a BSON element
* @returns
*/
private getElement(name: string | number): CachedBSONElement | null {
const cachedElement = this.cache[name];
if (cachedElement === false) return null;
if (cachedElement != null) {
return cachedElement;
}
if (typeof name === 'number') {
if (this.isArray) {
if (name < this.elements.length) {
const element = this.elements[name];
const cachedElement = { element, value: undefined };
this.cache[name] = cachedElement;
this.indexFound[name] = true;
return cachedElement;
} else {
return null;
}
} else {
return null;
}
}
for (let index = 0; index < this.elements.length; index++) {
const element = this.elements[index];
// skip this element if it has already been associated with a name
if (!(index in this.indexFound) && this.isElementName(name, element)) {
const cachedElement = { element, value: undefined };
this.cache[name] = cachedElement;
this.indexFound[index] = true;
return cachedElement;
}
}
this.cache[name] = false;
return null;
}
/**
* Translates BSON bytes into a javascript value. Checking `as` against the BSON element's type
* this methods returns the small subset of BSON types that the driver needs to function.
*
* @remarks
* - BSONType.null and BSONType.undefined always return null
* - If the type requested does not match this returns null
*
* @param element - The element to revive to a javascript value
* @param as - A type byte expected to be returned
*/
private toJSValue<T extends keyof JSTypeOf>(element: BSONElement, as: T): JSTypeOf[T];
private toJSValue(element: BSONElement, as: keyof JSTypeOf): any {
const type = element[BSONElementOffset.type];
const offset = element[BSONElementOffset.offset];
const length = element[BSONElementOffset.length];
if (as !== type) {
return null;
}
switch (as) {
case BSONType.null:
case BSONType.undefined:
return null;
case BSONType.double:
return getFloat64LE(this.bson, offset);
case BSONType.int:
return getInt32LE(this.bson, offset);
case BSONType.long:
return getBigInt64LE(this.bson, offset);
case BSONType.bool:
return Boolean(this.bson[offset]);
case BSONType.objectId:
return new ObjectId(this.bson.subarray(offset, offset + 12));
case BSONType.timestamp:
return new Timestamp(getBigInt64LE(this.bson, offset));
case BSONType.string:
return toUTF8(this.bson, offset + 4, offset + length - 1, false);
case BSONType.binData: {
const totalBinarySize = getInt32LE(this.bson, offset);
const subType = this.bson[offset + 4];
if (subType === 2) {
const subType2BinarySize = getInt32LE(this.bson, offset + 1 + 4);
if (subType2BinarySize < 0)
throw new BSONError('Negative binary type element size found for subtype 0x02');
if (subType2BinarySize > totalBinarySize - 4)
throw new BSONError('Binary type with subtype 0x02 contains too long binary size');
if (subType2BinarySize < totalBinarySize - 4)
throw new BSONError('Binary type with subtype 0x02 contains too short binary size');
return new Binary(
this.bson.subarray(offset + 1 + 4 + 4, offset + 1 + 4 + 4 + subType2BinarySize),
2
);
}
return new Binary(
this.bson.subarray(offset + 1 + 4, offset + 1 + 4 + totalBinarySize),
subType
);
}
case BSONType.date:
// Pretend this is correct.
return new Date(Number(getBigInt64LE(this.bson, offset)));
case BSONType.object:
return new OnDemandDocument(this.bson, offset);
case BSONType.array:
return new OnDemandDocument(this.bson, offset, true);
default:
throw new BSONError(`Unsupported BSON type: ${as}`);
}
}
/**
* Returns the number of elements in this BSON document
*/
public size() {
return this.elements.length;
}
/**
* Checks for the existence of an element by name.
*
* @remarks
* Uses `getElement` with the expectation that will populate caches such that a `has` call
* followed by a `getElement` call will not repeat the cost paid by the first look up.
*
* @param name - element name
*/
public has(name: string): boolean {
const cachedElement = this.cache[name];
if (cachedElement === false) return false;
if (cachedElement != null) return true;
return this.getElement(name) != null;
}
/**
* Turns BSON element with `name` into a javascript value.
*
* @typeParam T - must be one of the supported BSON types determined by `JSTypeOf` this will determine the return type of this function.
* @param name - the element name
* @param as - the bson type expected
* @param required - whether or not the element is expected to exist, if true this function will throw if it is not present
*/
public get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: boolean | undefined
): JSTypeOf[T] | null;
/** `required` will make `get` throw if name does not exist or is null/undefined */
public get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required: true
): JSTypeOf[T];
public get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: boolean
): JSTypeOf[T] | null {
const element = this.getElement(name);
if (element == null) {
if (required === true) {
throw new BSONError(`BSON element "${name}" is missing`);
} else {
return null;
}
}
if (element.value == null) {
const value = this.toJSValue(element.element, as);
if (value == null) {
if (required === true) {
throw new BSONError(`BSON element "${name}" is missing`);
} else {
return null;
}
}
// It is important to never store null
element.value = value;
}
return element.value;
}
/**
* Supports returning int, double, long, and bool as javascript numbers
*
* @remarks
* **NOTE:**
* - Use this _only_ when you believe the potential precision loss of an int64 is acceptable
* - This method does not cache the result as Longs or booleans would be stored incorrectly
*
* @param name - element name
* @param required - throws if name does not exist
*/
public getNumber<const Req extends boolean = false>(
name: string,
required?: Req
): Req extends true ? number : number | null;
public getNumber(name: string, required: boolean): number | null {
const maybeBool = this.get(name, BSONType.bool);
const bool = maybeBool == null ? null : maybeBool ? 1 : 0;
const maybeLong = this.get(name, BSONType.long);
const long = maybeLong == null ? null : Number(maybeLong);
const result = bool ?? long ?? this.get(name, BSONType.int) ?? this.get(name, BSONType.double);
if (required === true && result == null) {
throw new BSONError(`BSON element "${name}" is missing`);
}
return result;
}
/**
* Deserialize this object, DOES NOT cache result so avoid multiple invocations
* @param options - BSON deserialization options
*/
public toObject(options?: OnDemandDocumentDeserializeOptions): Record<string, any> {
return deserialize(this.bson, {
...options,
index: this.offset,
allowObjectSmallerThanBufferSize: true
});
}
/** Returns this document's bytes only */
toBytes() {
const size = getInt32LE(this.bson, this.offset);
return this.bson.subarray(this.offset, this.offset + size);
}
}

View file

@ -0,0 +1,393 @@
import {
type BSONElement,
type BSONSerializeOptions,
BSONType,
type DeserializeOptions,
type Document,
Long,
parseToElementsToArray,
parseUtf8ValidationOption,
pluckBSONSerializeOptions,
serialize,
type Timestamp
} from '../../bson';
import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error';
import { type ClusterTime } from '../../sdam/common';
import { decorateDecryptionResult, ns } from '../../utils';
import {
type JSTypeOf,
OnDemandDocument,
type OnDemandDocumentDeserializeOptions
} from './on_demand/document';
// eslint-disable-next-line no-restricted-syntax
const enum BSONElementOffset {
type = 0,
nameOffset = 1,
nameLength = 2,
offset = 3,
length = 4
}
/**
* Accepts a BSON payload and checks for na "ok: 0" element.
* This utility is intended to prevent calling response class constructors
* that expect the result to be a success and demand certain properties to exist.
*
* For example, a cursor response always expects a cursor embedded document.
* In order to write the class such that the properties reflect that assertion (non-null)
* we cannot invoke the subclass constructor if the BSON represents an error.
*
* @param bytes - BSON document returned from the server
*/
export function isErrorResponse(bson: Uint8Array, elements: BSONElement[]): boolean {
for (let eIdx = 0; eIdx < elements.length; eIdx++) {
const element = elements[eIdx];
if (element[BSONElementOffset.nameLength] === 2) {
const nameOffset = element[BSONElementOffset.nameOffset];
// 111 == "o", 107 == "k"
if (bson[nameOffset] === 111 && bson[nameOffset + 1] === 107) {
const valueOffset = element[BSONElementOffset.offset];
const valueLength = element[BSONElementOffset.length];
// If any byte in the length of the ok number (works for any type) is non zero,
// then it is considered "ok: 1"
for (let i = valueOffset; i < valueOffset + valueLength; i++) {
if (bson[i] !== 0x00) return false;
}
return true;
}
}
}
return true;
}
/** @internal */
export type MongoDBResponseConstructor = {
new (bson: Uint8Array, offset?: number, isArray?: boolean): MongoDBResponse;
make(bson: Uint8Array): MongoDBResponse;
};
/** @internal */
export class MongoDBResponse extends OnDemandDocument {
// Wrap error thrown from BSON
public override get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: false | undefined
): JSTypeOf[T] | null;
public override get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required: true
): JSTypeOf[T];
public override get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: boolean | undefined
): JSTypeOf[T] | null {
try {
return super.get(name, as, required);
} catch (cause) {
throw new MongoUnexpectedServerResponseError(cause.message, { cause });
}
}
static is(value: unknown): value is MongoDBResponse {
return value instanceof MongoDBResponse;
}
static make(bson: Uint8Array) {
const elements = parseToElementsToArray(bson, 0);
const isError = isErrorResponse(bson, elements);
return isError
? new MongoDBResponse(bson, 0, false, elements)
: new this(bson, 0, false, elements);
}
// {ok:1}
static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0]));
/**
* Returns true iff:
* - ok is 0 and the top-level code === 50
* - ok is 1 and the writeErrors array contains a code === 50
* - ok is 1 and the writeConcern object contains a code === 50
*/
get isMaxTimeExpiredError() {
// {ok: 0, code: 50 ... }
const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired;
if (isTopLevel) return true;
if (this.ok === 0) return false;
// {ok: 1, writeConcernError: {code: 50 ... }}
const isWriteConcern =
this.get('writeConcernError', BSONType.object)?.getNumber('code') ===
MONGODB_ERROR_CODES.MaxTimeMSExpired;
if (isWriteConcern) return true;
const writeErrors = this.get('writeErrors', BSONType.array);
if (writeErrors?.size()) {
for (let i = 0; i < writeErrors.size(); i++) {
const isWriteError =
writeErrors.get(i, BSONType.object)?.getNumber('code') ===
MONGODB_ERROR_CODES.MaxTimeMSExpired;
// {ok: 1, writeErrors: [{code: 50 ... }]}
if (isWriteError) return true;
}
}
return false;
}
/**
* Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the
* contents of the document.
*/
get recoveryToken(): Document | null {
return (
this.get('recoveryToken', BSONType.object)?.toObject({
promoteValues: false,
promoteLongs: false,
promoteBuffers: false,
validation: { utf8: true }
}) ?? null
);
}
/**
* The server creates a cursor in response to a snapshot find/aggregate command and reports atClusterTime within the cursor field in the response.
* For the distinct command the server adds a top-level atClusterTime field to the response.
* The atClusterTime field represents the timestamp of the read and is guaranteed to be majority committed.
*/
public get atClusterTime(): Timestamp | null {
return (
this.get('cursor', BSONType.object)?.get('atClusterTime', BSONType.timestamp) ??
this.get('atClusterTime', BSONType.timestamp)
);
}
public get operationTime(): Timestamp | null {
return this.get('operationTime', BSONType.timestamp);
}
/** Normalizes whatever BSON value is "ok" to a JS number 1 or 0. */
public get ok(): 0 | 1 {
return this.getNumber('ok') ? 1 : 0;
}
public get $err(): string | null {
return this.get('$err', BSONType.string);
}
public get errmsg(): string | null {
return this.get('errmsg', BSONType.string);
}
public get code(): number | null {
return this.getNumber('code');
}
private clusterTime?: ClusterTime | null;
public get $clusterTime(): ClusterTime | null {
if (!('clusterTime' in this)) {
const clusterTimeDoc = this.get('$clusterTime', BSONType.object);
if (clusterTimeDoc == null) {
this.clusterTime = null;
return null;
}
const clusterTime = clusterTimeDoc.get('clusterTime', BSONType.timestamp, true);
const signature = clusterTimeDoc.get('signature', BSONType.object)?.toObject();
// @ts-expect-error: `signature` is incorrectly typed. It is public API.
this.clusterTime = { clusterTime, signature };
}
return this.clusterTime ?? null;
}
public override toObject(options?: BSONSerializeOptions): Record<string, any> {
const exactBSONOptions = {
...pluckBSONSerializeOptions(options ?? {}),
validation: parseUtf8ValidationOption(options)
};
return super.toObject(exactBSONOptions);
}
}
/** @internal */
export class CursorResponse extends MongoDBResponse {
/**
* Devtools need to know which keys were encrypted before the driver automatically decrypted them.
* If decorating is enabled (`Symbol.for('@@mdb.decorateDecryptionResult')`), this field will be set,
* storing the original encrypted response from the server, so that we can build an object that has
* the list of BSON keys that were encrypted stored at a well known symbol: `Symbol.for('@@mdb.decryptedKeys')`.
*/
encryptedResponse?: MongoDBResponse;
/**
* This supports a feature of the FindCursor.
* It is an optimization to avoid an extra getMore when the limit has been reached
*/
static get emptyGetMore(): CursorResponse {
return new CursorResponse(serialize({ ok: 1, cursor: { id: 0n, nextBatch: [] } }));
}
static override is(value: unknown): value is CursorResponse {
return value instanceof CursorResponse || value === CursorResponse.emptyGetMore;
}
private _batch: OnDemandDocument | null = null;
private iterated = 0;
get cursor() {
return this.get('cursor', BSONType.object, true);
}
public get id(): Long {
try {
return Long.fromBigInt(this.cursor.get('id', BSONType.long, true));
} catch (cause) {
throw new MongoUnexpectedServerResponseError(cause.message, { cause });
}
}
public get ns() {
const namespace = this.cursor.get('ns', BSONType.string);
if (namespace != null) return ns(namespace);
return null;
}
public get length() {
return Math.max(this.batchSize - this.iterated, 0);
}
private _encryptedBatch: OnDemandDocument | null = null;
get encryptedBatch() {
if (this.encryptedResponse == null) return null;
if (this._encryptedBatch != null) return this._encryptedBatch;
const cursor = this.encryptedResponse?.get('cursor', BSONType.object);
if (cursor?.has('firstBatch'))
this._encryptedBatch = cursor.get('firstBatch', BSONType.array, true);
else if (cursor?.has('nextBatch'))
this._encryptedBatch = cursor.get('nextBatch', BSONType.array, true);
else throw new MongoUnexpectedServerResponseError('Cursor document did not contain a batch');
return this._encryptedBatch;
}
private get batch() {
if (this._batch != null) return this._batch;
const cursor = this.cursor;
if (cursor.has('firstBatch')) this._batch = cursor.get('firstBatch', BSONType.array, true);
else if (cursor.has('nextBatch')) this._batch = cursor.get('nextBatch', BSONType.array, true);
else throw new MongoUnexpectedServerResponseError('Cursor document did not contain a batch');
return this._batch;
}
public get batchSize() {
return this.batch?.size();
}
public get postBatchResumeToken() {
return (
this.cursor.get('postBatchResumeToken', BSONType.object)?.toObject({
promoteValues: false,
promoteLongs: false,
promoteBuffers: false,
validation: { utf8: true }
}) ?? null
);
}
public shift(options: OnDemandDocumentDeserializeOptions): any {
if (this.iterated >= this.batchSize) {
return null;
}
const result = this.batch.get(this.iterated, BSONType.object, true) ?? null;
const encryptedResult = this.encryptedBatch?.get(this.iterated, BSONType.object, true) ?? null;
this.iterated += 1;
if (options?.raw) {
return result.toBytes();
} else {
const object = result.toObject(options);
if (encryptedResult) {
decorateDecryptionResult(object, encryptedResult.toObject(options), true);
}
return object;
}
}
public clear() {
this.iterated = this.batchSize;
}
}
/**
* Explain responses have nothing to do with cursor responses
* This class serves to temporarily avoid refactoring how cursors handle
* explain responses which is to detect that the response is not cursor-like and return the explain
* result as the "first and only" document in the "batch" and end the "cursor"
*/
export class ExplainedCursorResponse extends CursorResponse {
isExplain = true;
override get id(): Long {
return Long.fromBigInt(0n);
}
override get batchSize() {
return 0;
}
override get ns() {
return null;
}
_length = 1;
override get length(): number {
return this._length;
}
override shift(options?: DeserializeOptions) {
if (this._length === 0) return null;
this._length -= 1;
return this.toObject(options);
}
}
/**
* Client bulk writes have some extra metadata at the top level that needs to be
* included in the result returned to the user.
*/
export class ClientBulkWriteCursorResponse extends CursorResponse {
get insertedCount() {
return this.get('nInserted', BSONType.int, true);
}
get upsertedCount() {
return this.get('nUpserted', BSONType.int, true);
}
get matchedCount() {
return this.get('nMatched', BSONType.int, true);
}
get modifiedCount() {
return this.get('nModified', BSONType.int, true);
}
get deletedCount() {
return this.get('nDeleted', BSONType.int, true);
}
get writeConcernError() {
return this.get('writeConcernError', BSONType.object, false);
}
}

View file

@ -0,0 +1,48 @@
import { MongoInvalidArgumentError } from '../../error';
import { ReadPreference, type ReadPreferenceLike } from '../../read_preference';
import { ServerType } from '../../sdam/common';
import type { Server } from '../../sdam/server';
import type { ServerDescription } from '../../sdam/server_description';
import type { Topology } from '../../sdam/topology';
import { TopologyDescription } from '../../sdam/topology_description';
import type { Connection } from '../connection';
export interface ReadPreferenceOption {
readPreference?: ReadPreferenceLike;
}
export function getReadPreference(options?: ReadPreferenceOption): ReadPreference {
// Default to command version of the readPreference.
let readPreference = options?.readPreference ?? ReadPreference.primary;
if (typeof readPreference === 'string') {
readPreference = ReadPreference.fromString(readPreference);
}
if (!(readPreference instanceof ReadPreference)) {
throw new MongoInvalidArgumentError(
'Option "readPreference" must be a ReadPreference instance'
);
}
return readPreference;
}
export function isSharded(topologyOrServer?: Topology | Server | Connection): boolean {
if (topologyOrServer == null) {
return false;
}
if (topologyOrServer.description && topologyOrServer.description.type === ServerType.Mongos) {
return true;
}
// NOTE: This is incredibly inefficient, and should be removed once command construction
// happens based on `Server` not `Topology`.
if (topologyOrServer.description && topologyOrServer.description instanceof TopologyDescription) {
const servers: ServerDescription[] = Array.from(topologyOrServer.description.servers.values());
return servers.some((server: ServerDescription) => server.type === ServerType.Mongos);
}
return false;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,177 @@
/* eslint-disable @typescript-eslint/no-unnecessary-type-assertion */
export const SYSTEM_NAMESPACE_COLLECTION = 'system.namespaces';
export const SYSTEM_INDEX_COLLECTION = 'system.indexes';
export const SYSTEM_PROFILE_COLLECTION = 'system.profile';
export const SYSTEM_USER_COLLECTION = 'system.users';
export const SYSTEM_COMMAND_COLLECTION = '$cmd';
export const SYSTEM_JS_COLLECTION = 'system.js';
// events
export const ERROR = 'error' as const;
export const TIMEOUT = 'timeout' as const;
export const CLOSE = 'close' as const;
export const OPEN = 'open' as const;
export const CONNECT = 'connect' as const;
export const CLOSED = 'closed' as const;
export const ENDED = 'ended' as const;
export const MESSAGE = 'message' as const;
export const PINNED = 'pinned' as const;
export const UNPINNED = 'unpinned' as const;
export const DESCRIPTION_RECEIVED = 'descriptionReceived';
/** @internal */
export const SERVER_OPENING = 'serverOpening' as const;
/** @internal */
export const SERVER_CLOSED = 'serverClosed' as const;
/** @internal */
export const SERVER_DESCRIPTION_CHANGED = 'serverDescriptionChanged' as const;
/** @internal */
export const TOPOLOGY_OPENING = 'topologyOpening' as const;
/** @internal */
export const TOPOLOGY_CLOSED = 'topologyClosed' as const;
/** @internal */
export const TOPOLOGY_DESCRIPTION_CHANGED = 'topologyDescriptionChanged' as const;
/** @internal */
export const SERVER_SELECTION_STARTED = 'serverSelectionStarted' as const;
/** @internal */
export const SERVER_SELECTION_FAILED = 'serverSelectionFailed' as const;
/** @internal */
export const SERVER_SELECTION_SUCCEEDED = 'serverSelectionSucceeded' as const;
/** @internal */
export const WAITING_FOR_SUITABLE_SERVER = 'waitingForSuitableServer' as const;
/** @internal */
export const CONNECTION_POOL_CREATED = 'connectionPoolCreated' as const;
/** @internal */
export const CONNECTION_POOL_CLOSED = 'connectionPoolClosed' as const;
/** @internal */
export const CONNECTION_POOL_CLEARED = 'connectionPoolCleared' as const;
/** @internal */
export const CONNECTION_POOL_READY = 'connectionPoolReady' as const;
/** @internal */
export const CONNECTION_CREATED = 'connectionCreated' as const;
/** @internal */
export const CONNECTION_READY = 'connectionReady' as const;
/** @internal */
export const CONNECTION_CLOSED = 'connectionClosed' as const;
/** @internal */
export const CONNECTION_CHECK_OUT_STARTED = 'connectionCheckOutStarted' as const;
/** @internal */
export const CONNECTION_CHECK_OUT_FAILED = 'connectionCheckOutFailed' as const;
/** @internal */
export const CONNECTION_CHECKED_OUT = 'connectionCheckedOut' as const;
/** @internal */
export const CONNECTION_CHECKED_IN = 'connectionCheckedIn' as const;
export const CLUSTER_TIME_RECEIVED = 'clusterTimeReceived' as const;
/** @internal */
export const COMMAND_STARTED = 'commandStarted' as const;
/** @internal */
export const COMMAND_SUCCEEDED = 'commandSucceeded' as const;
/** @internal */
export const COMMAND_FAILED = 'commandFailed' as const;
/** @internal */
export const SERVER_HEARTBEAT_STARTED = 'serverHeartbeatStarted' as const;
/** @internal */
export const SERVER_HEARTBEAT_SUCCEEDED = 'serverHeartbeatSucceeded' as const;
/** @internal */
export const SERVER_HEARTBEAT_FAILED = 'serverHeartbeatFailed' as const;
export const RESPONSE = 'response' as const;
export const MORE = 'more' as const;
export const INIT = 'init' as const;
export const CHANGE = 'change' as const;
export const END = 'end' as const;
export const RESUME_TOKEN_CHANGED = 'resumeTokenChanged' as const;
/** @public */
export const HEARTBEAT_EVENTS = Object.freeze([
SERVER_HEARTBEAT_STARTED,
SERVER_HEARTBEAT_SUCCEEDED,
SERVER_HEARTBEAT_FAILED
] as const);
/** @public */
export const CMAP_EVENTS = Object.freeze([
CONNECTION_POOL_CREATED,
CONNECTION_POOL_READY,
CONNECTION_POOL_CLEARED,
CONNECTION_POOL_CLOSED,
CONNECTION_CREATED,
CONNECTION_READY,
CONNECTION_CLOSED,
CONNECTION_CHECK_OUT_STARTED,
CONNECTION_CHECK_OUT_FAILED,
CONNECTION_CHECKED_OUT,
CONNECTION_CHECKED_IN
] as const);
/** @public */
export const TOPOLOGY_EVENTS = Object.freeze([
SERVER_OPENING,
SERVER_CLOSED,
SERVER_DESCRIPTION_CHANGED,
TOPOLOGY_OPENING,
TOPOLOGY_CLOSED,
TOPOLOGY_DESCRIPTION_CHANGED,
ERROR,
TIMEOUT,
CLOSE
] as const);
/** @public */
export const APM_EVENTS = Object.freeze([
COMMAND_STARTED,
COMMAND_SUCCEEDED,
COMMAND_FAILED
] as const);
/**
* All events that we relay to the `Topology`
* @internal
*/
export const SERVER_RELAY_EVENTS = Object.freeze([
SERVER_HEARTBEAT_STARTED,
SERVER_HEARTBEAT_SUCCEEDED,
SERVER_HEARTBEAT_FAILED,
COMMAND_STARTED,
COMMAND_SUCCEEDED,
COMMAND_FAILED,
...CMAP_EVENTS
] as const);
/**
* All events we listen to from `Server` instances, but do not forward to the client
* @internal
*/
export const LOCAL_SERVER_EVENTS = Object.freeze([
CONNECT,
DESCRIPTION_RECEIVED,
CLOSED,
ENDED
] as const);
/** @public */
export const MONGO_CLIENT_EVENTS = Object.freeze([
...CMAP_EVENTS,
...APM_EVENTS,
...TOPOLOGY_EVENTS,
...HEARTBEAT_EVENTS
] as const);
/**
* @internal
* The legacy hello command that was deprecated in MongoDB 5.0.
*/
export const LEGACY_HELLO_COMMAND = 'ismaster';
/**
* @internal
* The legacy hello command that was deprecated in MongoDB 5.0.
*/
export const LEGACY_HELLO_COMMAND_CAMEL_CASE = 'isMaster';
// Typescript errors if we index objects with `Symbol.for(...)`, so
// to avoid TS errors we pull them out into variables. Then we can type
// the objects (and class) that we expect to see them on and prevent TS
// errors.
/** @internal */
export const kDecorateResult = Symbol.for('@@mdb.decorateDecryptionResult');
/** @internal */
export const kDecoratedKeys = Symbol.for('@@mdb.decryptedKeys');

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,245 @@
import type { Document } from '../bson';
import { MongoAPIError } from '../error';
import {
Explain,
ExplainableCursor,
type ExplainCommandOptions,
type ExplainVerbosityLike,
validateExplainTimeoutOptions
} from '../explain';
import type { MongoClient } from '../mongo_client';
import { type Abortable } from '../mongo_types';
import { AggregateOperation, type AggregateOptions } from '../operations/aggregate';
import { executeOperation } from '../operations/execute_operation';
import type { ClientSession } from '../sessions';
import type { Sort } from '../sort';
import { mergeOptions, type MongoDBNamespace } from '../utils';
import {
type AbstractCursorOptions,
CursorTimeoutMode,
type InitialCursorResponse
} from './abstract_cursor';
/** @public */
export interface AggregationCursorOptions extends AbstractCursorOptions, AggregateOptions {}
/**
* The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB
* allowing for iteration over the results returned from the underlying query. It supports
* one by one document iteration, conversion to an array or can be iterated as a Node 4.X
* or higher stream
* @public
*/
export class AggregationCursor<TSchema = any> extends ExplainableCursor<TSchema> {
public readonly pipeline: Document[];
/** @internal */
private aggregateOptions: AggregateOptions & Abortable;
/** @internal */
constructor(
client: MongoClient,
namespace: MongoDBNamespace,
pipeline: Document[] = [],
options: AggregateOptions & Abortable = {}
) {
super(client, namespace, options);
this.pipeline = pipeline;
this.aggregateOptions = options;
const lastStage: Document | undefined = this.pipeline[this.pipeline.length - 1];
if (
this.cursorOptions.timeoutMS != null &&
this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION &&
(lastStage?.$merge != null || lastStage?.$out != null)
)
throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode');
}
clone(): AggregationCursor<TSchema> {
const clonedOptions = mergeOptions({}, this.aggregateOptions);
delete clonedOptions.session;
return new AggregationCursor(this.client, this.namespace, this.pipeline, {
...clonedOptions
});
}
override map<T>(transform: (doc: TSchema) => T): AggregationCursor<T> {
return super.map(transform) as AggregationCursor<T>;
}
/** @internal */
async _initialize(session: ClientSession): Promise<InitialCursorResponse> {
const options = {
...this.aggregateOptions,
...this.cursorOptions,
session,
signal: this.signal
};
if (options.explain) {
try {
validateExplainTimeoutOptions(options, Explain.fromOptions(options));
} catch {
throw new MongoAPIError(
'timeoutMS cannot be used with explain when explain is specified in aggregateOptions'
);
}
}
const aggregateOperation = new AggregateOperation(this.namespace, this.pipeline, options);
const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext);
return { server: aggregateOperation.server, session, response };
}
/** Execute the explain for the cursor */
async explain(): Promise<Document>;
async explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise<Document>;
async explain(options: { timeoutMS?: number }): Promise<Document>;
async explain(
verbosity: ExplainVerbosityLike | ExplainCommandOptions,
options: { timeoutMS?: number }
): Promise<Document>;
async explain(
verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number },
options?: { timeoutMS?: number }
): Promise<Document> {
const { explain, timeout } = this.resolveExplainTimeoutOptions(verbosity, options);
return (
await executeOperation(
this.client,
new AggregateOperation(this.namespace, this.pipeline, {
...this.aggregateOptions, // NOTE: order matters here, we may need to refine this
...this.cursorOptions,
...timeout,
explain: explain ?? true
})
)
).shift(this.deserializationOptions);
}
/** Add a stage to the aggregation pipeline
* @example
* ```
* const documents = await users.aggregate().addStage({ $match: { name: /Mike/ } }).toArray();
* ```
* @example
* ```
* const documents = await users.aggregate()
* .addStage<{ name: string }>({ $project: { name: true } })
* .toArray(); // type of documents is { name: string }[]
* ```
*/
addStage(stage: Document): this;
addStage<T = Document>(stage: Document): AggregationCursor<T>;
addStage<T = Document>(stage: Document): AggregationCursor<T> {
this.throwIfInitialized();
if (
this.cursorOptions.timeoutMS != null &&
this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION &&
(stage.$out != null || stage.$merge != null)
) {
throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode');
}
this.pipeline.push(stage);
return this as unknown as AggregationCursor<T>;
}
/** Add a group stage to the aggregation pipeline */
group<T = TSchema>($group: Document): AggregationCursor<T>;
group($group: Document): this {
return this.addStage({ $group });
}
/** Add a limit stage to the aggregation pipeline */
limit($limit: number): this {
return this.addStage({ $limit });
}
/** Add a match stage to the aggregation pipeline */
match($match: Document): this {
return this.addStage({ $match });
}
/** Add an out stage to the aggregation pipeline */
out($out: { db: string; coll: string } | string): this {
return this.addStage({ $out });
}
/**
* Add a project stage to the aggregation pipeline
*
* @remarks
* In order to strictly type this function you must provide an interface
* that represents the effect of your projection on the result documents.
*
* By default chaining a projection to your cursor changes the returned type to the generic {@link Document} type.
* You should specify a parameterized type to have assertions on your final results.
*
* @example
* ```typescript
* // Best way
* const docs: AggregationCursor<{ a: number }> = cursor.project<{ a: number }>({ _id: 0, a: true });
* // Flexible way
* const docs: AggregationCursor<Document> = cursor.project({ _id: 0, a: true });
* ```
*
* @remarks
* In order to strictly type this function you must provide an interface
* that represents the effect of your projection on the result documents.
*
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
* it **does not** return a new instance of a cursor. This means when calling project,
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
* Take note of the following example:
*
* @example
* ```typescript
* const cursor: AggregationCursor<{ a: number; b: string }> = coll.aggregate([]);
* const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
* const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();
*
* // or always use chaining and save the final cursor
*
* const cursor = coll.aggregate().project<{ a: string }>({
* _id: 0,
* a: { $convert: { input: '$a', to: 'string' }
* }});
* ```
*/
project<T extends Document = Document>($project: Document): AggregationCursor<T> {
return this.addStage<T>({ $project });
}
/** Add a lookup stage to the aggregation pipeline */
lookup($lookup: Document): this {
return this.addStage({ $lookup });
}
/** Add a redact stage to the aggregation pipeline */
redact($redact: Document): this {
return this.addStage({ $redact });
}
/** Add a skip stage to the aggregation pipeline */
skip($skip: number): this {
return this.addStage({ $skip });
}
/** Add a sort stage to the aggregation pipeline */
sort($sort: Sort): this {
return this.addStage({ $sort });
}
/** Add a unwind stage to the aggregation pipeline */
unwind($unwind: Document | string): this {
return this.addStage({ $unwind });
}
/** Add a geoNear stage to the aggregation pipeline */
geoNear($geoNear: Document): this {
return this.addStage({ $geoNear });
}
}

View file

@ -0,0 +1,172 @@
import type { Document } from '../bson';
import {
ChangeStream,
type ChangeStreamDocument,
type ChangeStreamEvents,
type OperationTime,
type ResumeToken
} from '../change_stream';
import { type CursorResponse } from '../cmap/wire_protocol/responses';
import { INIT, RESPONSE } from '../constants';
import type { MongoClient } from '../mongo_client';
import { AggregateOperation } from '../operations/aggregate';
import type { CollationOptions } from '../operations/command';
import { executeOperation } from '../operations/execute_operation';
import type { ClientSession } from '../sessions';
import { maxWireVersion, type MongoDBNamespace } from '../utils';
import {
AbstractCursor,
type AbstractCursorOptions,
type InitialCursorResponse
} from './abstract_cursor';
/** @internal */
export interface ChangeStreamCursorOptions extends AbstractCursorOptions {
startAtOperationTime?: OperationTime;
resumeAfter?: ResumeToken;
startAfter?: ResumeToken;
maxAwaitTimeMS?: number;
collation?: CollationOptions;
fullDocument?: string;
}
/** @internal */
export class ChangeStreamCursor<
TSchema extends Document = Document,
TChange extends Document = ChangeStreamDocument<TSchema>
> extends AbstractCursor<TChange, ChangeStreamEvents> {
private _resumeToken: ResumeToken;
private startAtOperationTime: OperationTime | null;
private hasReceived?: boolean;
private readonly changeStreamCursorOptions: ChangeStreamCursorOptions;
private postBatchResumeToken?: ResumeToken;
private readonly pipeline: Document[];
/**
* @internal
*
* used to determine change stream resumability
*/
maxWireVersion: number | undefined;
constructor(
client: MongoClient,
namespace: MongoDBNamespace,
pipeline: Document[] = [],
options: ChangeStreamCursorOptions = {}
) {
super(client, namespace, { ...options, tailable: true, awaitData: true });
this.pipeline = pipeline;
this.changeStreamCursorOptions = options;
this._resumeToken = null;
this.startAtOperationTime = options.startAtOperationTime ?? null;
if (options.startAfter) {
this.resumeToken = options.startAfter;
} else if (options.resumeAfter) {
this.resumeToken = options.resumeAfter;
}
}
set resumeToken(token: ResumeToken) {
this._resumeToken = token;
this.emit(ChangeStream.RESUME_TOKEN_CHANGED, token);
}
get resumeToken(): ResumeToken {
return this._resumeToken;
}
get resumeOptions(): ChangeStreamCursorOptions {
const options: ChangeStreamCursorOptions = {
...this.changeStreamCursorOptions
};
for (const key of ['resumeAfter', 'startAfter', 'startAtOperationTime'] as const) {
delete options[key];
}
if (this.resumeToken != null) {
if (this.changeStreamCursorOptions.startAfter && !this.hasReceived) {
options.startAfter = this.resumeToken;
} else {
options.resumeAfter = this.resumeToken;
}
} else if (this.startAtOperationTime != null && maxWireVersion(this.server) >= 7) {
options.startAtOperationTime = this.startAtOperationTime;
}
return options;
}
cacheResumeToken(resumeToken: ResumeToken): void {
if (this.bufferedCount() === 0 && this.postBatchResumeToken) {
this.resumeToken = this.postBatchResumeToken;
} else {
this.resumeToken = resumeToken;
}
this.hasReceived = true;
}
_processBatch(response: CursorResponse): void {
const { postBatchResumeToken } = response;
if (postBatchResumeToken) {
this.postBatchResumeToken = postBatchResumeToken;
if (response.batchSize === 0) {
this.resumeToken = postBatchResumeToken;
}
}
}
clone(): AbstractCursor<TChange> {
return new ChangeStreamCursor(this.client, this.namespace, this.pipeline, {
...this.cursorOptions
});
}
async _initialize(session: ClientSession): Promise<InitialCursorResponse> {
const aggregateOperation = new AggregateOperation(this.namespace, this.pipeline, {
...this.cursorOptions,
...this.changeStreamCursorOptions,
session
});
const response = await executeOperation(
session.client,
aggregateOperation,
this.timeoutContext
);
const server = aggregateOperation.server;
this.maxWireVersion = maxWireVersion(server);
if (
this.startAtOperationTime == null &&
this.changeStreamCursorOptions.resumeAfter == null &&
this.changeStreamCursorOptions.startAfter == null &&
this.maxWireVersion >= 7
) {
this.startAtOperationTime = response.operationTime;
}
this._processBatch(response);
this.emit(INIT, response);
this.emit(RESPONSE);
return { server, session, response };
}
override async getMore(batchSize: number): Promise<CursorResponse> {
const response = await super.getMore(batchSize);
this.maxWireVersion = maxWireVersion(this.server);
this._processBatch(response);
this.emit(ChangeStream.MORE, response);
this.emit(ChangeStream.RESPONSE);
return response;
}
}

View file

@ -0,0 +1,83 @@
import { type Document } from '../bson';
import { type ClientBulkWriteCursorResponse } from '../cmap/wire_protocol/responses';
import type { MongoClient } from '../mongo_client';
import { ClientBulkWriteOperation } from '../operations/client_bulk_write/client_bulk_write';
import { type ClientBulkWriteCommandBuilder } from '../operations/client_bulk_write/command_builder';
import { type ClientBulkWriteOptions } from '../operations/client_bulk_write/common';
import { executeOperation } from '../operations/execute_operation';
import type { ClientSession } from '../sessions';
import { mergeOptions, MongoDBNamespace } from '../utils';
import {
AbstractCursor,
type AbstractCursorOptions,
type InitialCursorResponse
} from './abstract_cursor';
/** @public */
export interface ClientBulkWriteCursorOptions
extends Omit<AbstractCursorOptions, 'maxAwaitTimeMS' | 'tailable' | 'awaitData'>,
ClientBulkWriteOptions {}
/**
* This is the cursor that handles client bulk write operations. Note this is never
* exposed directly to the user and is always immediately exhausted.
* @internal
*/
export class ClientBulkWriteCursor extends AbstractCursor {
commandBuilder: ClientBulkWriteCommandBuilder;
/** @internal */
private cursorResponse?: ClientBulkWriteCursorResponse;
/** @internal */
private clientBulkWriteOptions: ClientBulkWriteOptions;
/** @internal */
constructor(
client: MongoClient,
commandBuilder: ClientBulkWriteCommandBuilder,
options: ClientBulkWriteCursorOptions = {}
) {
super(client, new MongoDBNamespace('admin', '$cmd'), options);
this.commandBuilder = commandBuilder;
this.clientBulkWriteOptions = options;
}
/**
* We need a way to get the top level cursor response fields for
* generating the bulk write result, so we expose this here.
*/
get response(): ClientBulkWriteCursorResponse | null {
if (this.cursorResponse) return this.cursorResponse;
return null;
}
get operations(): Document[] {
return this.commandBuilder.lastOperations;
}
clone(): ClientBulkWriteCursor {
const clonedOptions = mergeOptions({}, this.clientBulkWriteOptions);
delete clonedOptions.session;
return new ClientBulkWriteCursor(this.client, this.commandBuilder, {
...clonedOptions
});
}
/** @internal */
async _initialize(session: ClientSession): Promise<InitialCursorResponse> {
const clientBulkWriteOperation = new ClientBulkWriteOperation(this.commandBuilder, {
...this.clientBulkWriteOptions,
...this.cursorOptions,
session
});
const response = await executeOperation(
this.client,
clientBulkWriteOperation,
this.timeoutContext
);
this.cursorResponse = response;
return { server: clientBulkWriteOperation.server, session, response };
}
}

View file

@ -0,0 +1,484 @@
import { type Document } from '../bson';
import { CursorResponse } from '../cmap/wire_protocol/responses';
import { MongoAPIError, MongoInvalidArgumentError, MongoTailableCursorError } from '../error';
import {
Explain,
ExplainableCursor,
type ExplainCommandOptions,
type ExplainVerbosityLike,
validateExplainTimeoutOptions
} from '../explain';
import type { MongoClient } from '../mongo_client';
import { type Abortable } from '../mongo_types';
import type { CollationOptions } from '../operations/command';
import { CountOperation, type CountOptions } from '../operations/count';
import { executeOperation } from '../operations/execute_operation';
import { FindOperation, type FindOptions } from '../operations/find';
import type { Hint } from '../operations/operation';
import type { ClientSession } from '../sessions';
import { formatSort, type Sort, type SortDirection } from '../sort';
import { emitWarningOnce, mergeOptions, type MongoDBNamespace, squashError } from '../utils';
import { type InitialCursorResponse } from './abstract_cursor';
/** @public Flags allowed for cursor */
export const FLAGS = [
'tailable',
'oplogReplay',
'noCursorTimeout',
'awaitData',
'exhaust',
'partial'
] as const;
/** @public */
export class FindCursor<TSchema = any> extends ExplainableCursor<TSchema> {
/** @internal */
private cursorFilter: Document;
/** @internal */
private numReturned = 0;
/** @internal */
private readonly findOptions: FindOptions & Abortable;
/** @internal */
constructor(
client: MongoClient,
namespace: MongoDBNamespace,
filter: Document = {},
options: FindOptions & Abortable = {}
) {
super(client, namespace, options);
this.cursorFilter = filter;
this.findOptions = options;
if (options.sort != null) {
this.findOptions.sort = formatSort(options.sort);
}
}
clone(): FindCursor<TSchema> {
const clonedOptions = mergeOptions({}, this.findOptions);
delete clonedOptions.session;
return new FindCursor(this.client, this.namespace, this.cursorFilter, {
...clonedOptions
});
}
override map<T>(transform: (doc: TSchema) => T): FindCursor<T> {
return super.map(transform) as FindCursor<T>;
}
/** @internal */
async _initialize(session: ClientSession): Promise<InitialCursorResponse> {
const options = {
...this.findOptions, // NOTE: order matters here, we may need to refine this
...this.cursorOptions,
session,
signal: this.signal
};
if (options.explain) {
try {
validateExplainTimeoutOptions(options, Explain.fromOptions(options));
} catch {
throw new MongoAPIError(
'timeoutMS cannot be used with explain when explain is specified in findOptions'
);
}
}
const findOperation = new FindOperation(this.namespace, this.cursorFilter, options);
const response = await executeOperation(this.client, findOperation, this.timeoutContext);
// the response is not a cursor when `explain` is enabled
this.numReturned = response.batchSize;
return { server: findOperation.server, session, response };
}
/** @internal */
override async getMore(batchSize: number): Promise<CursorResponse> {
const numReturned = this.numReturned;
if (numReturned) {
// TODO(DRIVERS-1448): Remove logic to enforce `limit` in the driver
const limit = this.findOptions.limit;
batchSize =
limit && limit > 0 && numReturned + batchSize > limit ? limit - numReturned : batchSize;
if (batchSize <= 0) {
try {
await this.close();
} catch (error) {
squashError(error);
// this is an optimization for the special case of a limit for a find command to avoid an
// extra getMore when the limit has been reached and the limit is a multiple of the batchSize.
// This is a consequence of the new query engine in 5.0 having no knowledge of the limit as it
// produces results for the find command. Once a batch is filled up, it is returned and only
// on the subsequent getMore will the query framework consider the limit, determine the cursor
// is exhausted and return a cursorId of zero.
// instead, if we determine there are no more documents to request from the server, we preemptively
// close the cursor
}
return CursorResponse.emptyGetMore;
}
}
const response = await super.getMore(batchSize);
// TODO: wrap this in some logic to prevent it from happening if we don't need this support
this.numReturned = this.numReturned + response.batchSize;
return response;
}
/**
* Get the count of documents for this cursor
* @deprecated Use `collection.estimatedDocumentCount` or `collection.countDocuments` instead
*/
async count(options?: CountOptions): Promise<number> {
emitWarningOnce(
'cursor.count is deprecated and will be removed in the next major version, please use `collection.estimatedDocumentCount` or `collection.countDocuments` instead '
);
if (typeof options === 'boolean') {
throw new MongoInvalidArgumentError('Invalid first parameter to count');
}
return await executeOperation(
this.client,
new CountOperation(this.namespace, this.cursorFilter, {
...this.findOptions, // NOTE: order matters here, we may need to refine this
...this.cursorOptions,
...options
})
);
}
/** Execute the explain for the cursor */
async explain(): Promise<Document>;
async explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise<Document>;
async explain(options: { timeoutMS?: number }): Promise<Document>;
async explain(
verbosity: ExplainVerbosityLike | ExplainCommandOptions,
options: { timeoutMS?: number }
): Promise<Document>;
async explain(
verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number },
options?: { timeoutMS?: number }
): Promise<Document> {
const { explain, timeout } = this.resolveExplainTimeoutOptions(verbosity, options);
return (
await executeOperation(
this.client,
new FindOperation(this.namespace, this.cursorFilter, {
...this.findOptions, // NOTE: order matters here, we may need to refine this
...this.cursorOptions,
...timeout,
explain: explain ?? true
})
)
).shift(this.deserializationOptions);
}
/** Set the cursor query */
filter(filter: Document): this {
this.throwIfInitialized();
this.cursorFilter = filter;
return this;
}
/**
* Set the cursor hint
*
* @param hint - If specified, then the query system will only consider plans using the hinted index.
*/
hint(hint: Hint): this {
this.throwIfInitialized();
this.findOptions.hint = hint;
return this;
}
/**
* Set the cursor min
*
* @param min - Specify a $min value to specify the inclusive lower bound for a specific index in order to constrain the results of find(). The $min specifies the lower bound for all keys of a specific index in order.
*/
min(min: Document): this {
this.throwIfInitialized();
this.findOptions.min = min;
return this;
}
/**
* Set the cursor max
*
* @param max - Specify a $max value to specify the exclusive upper bound for a specific index in order to constrain the results of find(). The $max specifies the upper bound for all keys of a specific index in order.
*/
max(max: Document): this {
this.throwIfInitialized();
this.findOptions.max = max;
return this;
}
/**
* Set the cursor returnKey.
* If set to true, modifies the cursor to only return the index field or fields for the results of the query, rather than documents.
* If set to true and the query does not use an index to perform the read operation, the returned documents will not contain any fields.
*
* @param value - the returnKey value.
*/
returnKey(value: boolean): this {
this.throwIfInitialized();
this.findOptions.returnKey = value;
return this;
}
/**
* Modifies the output of a query by adding a field $recordId to matching documents. $recordId is the internal key which uniquely identifies a document in a collection.
*
* @param value - The $showDiskLoc option has now been deprecated and replaced with the showRecordId field. $showDiskLoc will still be accepted for OP_QUERY stye find.
*/
showRecordId(value: boolean): this {
this.throwIfInitialized();
this.findOptions.showRecordId = value;
return this;
}
/**
* Add a query modifier to the cursor query
*
* @param name - The query modifier (must start with $, such as $orderby etc)
* @param value - The modifier value.
*/
addQueryModifier(name: string, value: string | boolean | number | Document): this {
this.throwIfInitialized();
if (name[0] !== '$') {
throw new MongoInvalidArgumentError(`${name} is not a valid query modifier`);
}
// Strip of the $
const field = name.substr(1);
// NOTE: consider some TS magic for this
switch (field) {
case 'comment':
this.findOptions.comment = value as string | Document;
break;
case 'explain':
this.findOptions.explain = value as boolean;
break;
case 'hint':
this.findOptions.hint = value as string | Document;
break;
case 'max':
this.findOptions.max = value as Document;
break;
case 'maxTimeMS':
this.findOptions.maxTimeMS = value as number;
break;
case 'min':
this.findOptions.min = value as Document;
break;
case 'orderby':
this.findOptions.sort = formatSort(value as string | Document);
break;
case 'query':
this.cursorFilter = value as Document;
break;
case 'returnKey':
this.findOptions.returnKey = value as boolean;
break;
case 'showDiskLoc':
this.findOptions.showRecordId = value as boolean;
break;
default:
throw new MongoInvalidArgumentError(`Invalid query modifier: ${name}`);
}
return this;
}
/**
* Add a comment to the cursor query allowing for tracking the comment in the log.
*
* @param value - The comment attached to this query.
*/
comment(value: string): this {
this.throwIfInitialized();
this.findOptions.comment = value;
return this;
}
/**
* Set a maxAwaitTimeMS on a tailing cursor query to allow to customize the timeout value for the option awaitData (Only supported on MongoDB 3.2 or higher, ignored otherwise)
*
* @param value - Number of milliseconds to wait before aborting the tailed query.
*/
maxAwaitTimeMS(value: number): this {
this.throwIfInitialized();
if (typeof value !== 'number') {
throw new MongoInvalidArgumentError('Argument for maxAwaitTimeMS must be a number');
}
this.findOptions.maxAwaitTimeMS = value;
return this;
}
/**
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
*
* @param value - Number of milliseconds to wait before aborting the query.
*/
override maxTimeMS(value: number): this {
this.throwIfInitialized();
if (typeof value !== 'number') {
throw new MongoInvalidArgumentError('Argument for maxTimeMS must be a number');
}
this.findOptions.maxTimeMS = value;
return this;
}
/**
* Add a project stage to the aggregation pipeline
*
* @remarks
* In order to strictly type this function you must provide an interface
* that represents the effect of your projection on the result documents.
*
* By default chaining a projection to your cursor changes the returned type to the generic
* {@link Document} type.
* You should specify a parameterized type to have assertions on your final results.
*
* @example
* ```typescript
* // Best way
* const docs: FindCursor<{ a: number }> = cursor.project<{ a: number }>({ _id: 0, a: true });
* // Flexible way
* const docs: FindCursor<Document> = cursor.project({ _id: 0, a: true });
* ```
*
* @remarks
*
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
* it **does not** return a new instance of a cursor. This means when calling project,
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
* Take note of the following example:
*
* @example
* ```typescript
* const cursor: FindCursor<{ a: number; b: string }> = coll.find();
* const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
* const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();
*
* // or always use chaining and save the final cursor
*
* const cursor = coll.find().project<{ a: string }>({
* _id: 0,
* a: { $convert: { input: '$a', to: 'string' }
* }});
* ```
*/
project<T extends Document = Document>(value: Document): FindCursor<T> {
this.throwIfInitialized();
this.findOptions.projection = value;
return this as unknown as FindCursor<T>;
}
/**
* Sets the sort order of the cursor query.
*
* @param sort - The key or keys set for the sort.
* @param direction - The direction of the sorting (1 or -1).
*/
sort(sort: Sort | string, direction?: SortDirection): this {
this.throwIfInitialized();
if (this.findOptions.tailable) {
throw new MongoTailableCursorError('Tailable cursor does not support sorting');
}
this.findOptions.sort = formatSort(sort, direction);
return this;
}
/**
* Allows disk use for blocking sort operations exceeding 100MB memory. (MongoDB 3.2 or higher)
*
* @remarks
* {@link https://www.mongodb.com/docs/manual/reference/command/find/#find-cmd-allowdiskuse | find command allowDiskUse documentation}
*/
allowDiskUse(allow = true): this {
this.throwIfInitialized();
if (!this.findOptions.sort) {
throw new MongoInvalidArgumentError('Option "allowDiskUse" requires a sort specification');
}
// As of 6.0 the default is true. This allows users to get back to the old behavior.
if (!allow) {
this.findOptions.allowDiskUse = false;
return this;
}
this.findOptions.allowDiskUse = true;
return this;
}
/**
* Set the collation options for the cursor.
*
* @param value - The cursor collation options (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
*/
collation(value: CollationOptions): this {
this.throwIfInitialized();
this.findOptions.collation = value;
return this;
}
/**
* Set the limit for the cursor.
*
* @param value - The limit for the cursor query.
*/
limit(value: number): this {
this.throwIfInitialized();
if (this.findOptions.tailable) {
throw new MongoTailableCursorError('Tailable cursor does not support limit');
}
if (typeof value !== 'number') {
throw new MongoInvalidArgumentError('Operation "limit" requires an integer');
}
this.findOptions.limit = value;
return this;
}
/**
* Set the skip for the cursor.
*
* @param value - The skip for the cursor query.
*/
skip(value: number): this {
this.throwIfInitialized();
if (this.findOptions.tailable) {
throw new MongoTailableCursorError('Tailable cursor does not support skip');
}
if (typeof value !== 'number') {
throw new MongoInvalidArgumentError('Operation "skip" requires an integer');
}
this.findOptions.skip = value;
return this;
}
}

View file

@ -0,0 +1,50 @@
import type { Document } from '../bson';
import type { Db } from '../db';
import { type Abortable } from '../mongo_types';
import { executeOperation } from '../operations/execute_operation';
import {
type CollectionInfo,
ListCollectionsOperation,
type ListCollectionsOptions
} from '../operations/list_collections';
import type { ClientSession } from '../sessions';
import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor';
/** @public */
export class ListCollectionsCursor<
T extends Pick<CollectionInfo, 'name' | 'type'> | CollectionInfo =
| Pick<CollectionInfo, 'name' | 'type'>
| CollectionInfo
> extends AbstractCursor<T> {
parent: Db;
filter: Document;
options?: ListCollectionsOptions & Abortable;
constructor(db: Db, filter: Document, options?: ListCollectionsOptions & Abortable) {
super(db.client, db.s.namespace, options);
this.parent = db;
this.filter = filter;
this.options = options;
}
clone(): ListCollectionsCursor<T> {
return new ListCollectionsCursor(this.parent, this.filter, {
...this.options,
...this.cursorOptions
});
}
/** @internal */
async _initialize(session: ClientSession | undefined): Promise<InitialCursorResponse> {
const operation = new ListCollectionsOperation(this.parent, this.filter, {
...this.cursorOptions,
...this.options,
session,
signal: this.signal
});
const response = await executeOperation(this.parent.client, operation, this.timeoutContext);
return { server: operation.server, session, response };
}
}

View file

@ -0,0 +1,37 @@
import type { Collection } from '../collection';
import { executeOperation } from '../operations/execute_operation';
import { ListIndexesOperation, type ListIndexesOptions } from '../operations/indexes';
import type { ClientSession } from '../sessions';
import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor';
/** @public */
export class ListIndexesCursor extends AbstractCursor {
parent: Collection;
options?: ListIndexesOptions;
constructor(collection: Collection, options?: ListIndexesOptions) {
super(collection.client, collection.s.namespace, options);
this.parent = collection;
this.options = options;
}
clone(): ListIndexesCursor {
return new ListIndexesCursor(this.parent, {
...this.options,
...this.cursorOptions
});
}
/** @internal */
async _initialize(session: ClientSession | undefined): Promise<InitialCursorResponse> {
const operation = new ListIndexesOperation(this.parent, {
...this.cursorOptions,
...this.options,
session
});
const response = await executeOperation(this.parent.client, operation, this.timeoutContext);
return { server: operation.server, session, response };
}
}

View file

@ -0,0 +1,20 @@
import type { Collection } from '../collection';
import type { AggregateOptions } from '../operations/aggregate';
import { AggregationCursor } from './aggregation_cursor';
/** @public */
export type ListSearchIndexesOptions = Omit<AggregateOptions, 'readConcern' | 'writeConcern'>;
/** @public */
export class ListSearchIndexesCursor extends AggregationCursor<{ name: string }> {
/** @internal */
constructor(
{ fullNamespace: ns, client }: Collection,
name: string | null,
options: ListSearchIndexesOptions = {}
) {
const pipeline =
name == null ? [{ $listSearchIndexes: {} }] : [{ $listSearchIndexes: { name } }];
super(client, ns, pipeline, options);
}
}

View file

@ -0,0 +1,173 @@
import type { BSONSerializeOptions, Document } from '../bson';
import { CursorResponse } from '../cmap/wire_protocol/responses';
import type { Db } from '../db';
import { MongoAPIError } from '../error';
import { executeOperation } from '../operations/execute_operation';
import { GetMoreOperation } from '../operations/get_more';
import { RunCommandOperation } from '../operations/run_command';
import type { ReadConcernLike } from '../read_concern';
import type { ReadPreferenceLike } from '../read_preference';
import type { ClientSession } from '../sessions';
import { ns } from '../utils';
import {
AbstractCursor,
type CursorTimeoutMode,
type InitialCursorResponse
} from './abstract_cursor';
/** @public */
export type RunCursorCommandOptions = {
readPreference?: ReadPreferenceLike;
session?: ClientSession;
/**
* @experimental
* Specifies the time an operation will run until it throws a timeout error. Note that if
* `maxTimeMS` is provided in the command in addition to setting `timeoutMS` in the options, then
* the original value of `maxTimeMS` will be overwritten.
*/
timeoutMS?: number;
/**
* @public
* @experimental
* Specifies how `timeoutMS` is applied to the cursor. Can be either `'cursorLifeTime'` or `'iteration'`
* When set to `'iteration'`, the deadline specified by `timeoutMS` applies to each call of
* `cursor.next()`.
* When set to `'cursorLifetime'`, the deadline applies to the life of the entire cursor.
*
* Depending on the type of cursor being used, this option has different default values.
* For non-tailable cursors, this value defaults to `'cursorLifetime'`
* For tailable cursors, this value defaults to `'iteration'` since tailable cursors, by
* definition can have an arbitrarily long lifetime.
*
* @example
* ```ts
* const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'});
* for await (const doc of cursor) {
* // process doc
* // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but
* // will continue to iterate successfully otherwise, regardless of the number of batches.
* }
* ```
*
* @example
* ```ts
* const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' });
* const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms.
* ```
*/
timeoutMode?: CursorTimeoutMode;
tailable?: boolean;
awaitData?: boolean;
} & BSONSerializeOptions;
/** @public */
export class RunCommandCursor extends AbstractCursor {
public readonly command: Readonly<Record<string, any>>;
public readonly getMoreOptions: {
comment?: any;
maxAwaitTimeMS?: number;
batchSize?: number;
} = {};
/**
* Controls the `getMore.comment` field
* @param comment - any BSON value
*/
public setComment(comment: any): this {
this.getMoreOptions.comment = comment;
return this;
}
/**
* Controls the `getMore.maxTimeMS` field. Only valid when cursor is tailable await
* @param maxTimeMS - the number of milliseconds to wait for new data
*/
public setMaxTimeMS(maxTimeMS: number): this {
this.getMoreOptions.maxAwaitTimeMS = maxTimeMS;
return this;
}
/**
* Controls the `getMore.batchSize` field
* @param batchSize - the number documents to return in the `nextBatch`
*/
public setBatchSize(batchSize: number): this {
this.getMoreOptions.batchSize = batchSize;
return this;
}
/** Unsupported for RunCommandCursor */
public override clone(): never {
throw new MongoAPIError('Clone not supported, create a new cursor with db.runCursorCommand');
}
/** Unsupported for RunCommandCursor: readConcern must be configured directly on command document */
public override withReadConcern(_: ReadConcernLike): never {
throw new MongoAPIError(
'RunCommandCursor does not support readConcern it must be attached to the command being run'
);
}
/** Unsupported for RunCommandCursor: various cursor flags must be configured directly on command document */
public override addCursorFlag(_: string, __: boolean): never {
throw new MongoAPIError(
'RunCommandCursor does not support cursor flags, they must be attached to the command being run'
);
}
/**
* Unsupported for RunCommandCursor: maxTimeMS must be configured directly on command document
*/
public override maxTimeMS(_: number): never {
throw new MongoAPIError(
'maxTimeMS must be configured on the command document directly, to configure getMore.maxTimeMS use cursor.setMaxTimeMS()'
);
}
/** Unsupported for RunCommandCursor: batchSize must be configured directly on command document */
public override batchSize(_: number): never {
throw new MongoAPIError(
'batchSize must be configured on the command document directly, to configure getMore.batchSize use cursor.setBatchSize()'
);
}
/** @internal */
private db: Db;
/** @internal */
constructor(db: Db, command: Document, options: RunCursorCommandOptions = {}) {
super(db.client, ns(db.namespace), options);
this.db = db;
this.command = Object.freeze({ ...command });
}
/** @internal */
protected async _initialize(session: ClientSession): Promise<InitialCursorResponse> {
const operation = new RunCommandOperation<CursorResponse>(this.db, this.command, {
...this.cursorOptions,
session: session,
readPreference: this.cursorOptions.readPreference,
responseType: CursorResponse
});
const response = await executeOperation(this.client, operation, this.timeoutContext);
return {
server: operation.server,
session,
response
};
}
/** @internal */
override async getMore(_batchSize: number): Promise<CursorResponse> {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const getMoreOperation = new GetMoreOperation(this.namespace, this.id!, this.server!, {
...this.cursorOptions,
session: this.session,
...this.getMoreOptions
});
return await executeOperation(this.client, getMoreOperation, this.timeoutContext);
}
}

622
parts/3/phonebookBackend/node_modules/mongodb/src/db.ts generated vendored Normal file
View file

@ -0,0 +1,622 @@
import { Admin } from './admin';
import { type BSONSerializeOptions, type Document, resolveBSONOptions } from './bson';
import { ChangeStream, type ChangeStreamDocument, type ChangeStreamOptions } from './change_stream';
import { Collection, type CollectionOptions } from './collection';
import * as CONSTANTS from './constants';
import { AggregationCursor } from './cursor/aggregation_cursor';
import { ListCollectionsCursor } from './cursor/list_collections_cursor';
import { RunCommandCursor, type RunCursorCommandOptions } from './cursor/run_command_cursor';
import { MongoInvalidArgumentError } from './error';
import type { MongoClient, PkFactory } from './mongo_client';
import type { Abortable, TODO_NODE_3286 } from './mongo_types';
import type { AggregateOptions } from './operations/aggregate';
import { CollectionsOperation } from './operations/collections';
import {
CreateCollectionOperation,
type CreateCollectionOptions
} from './operations/create_collection';
import {
DropCollectionOperation,
type DropCollectionOptions,
DropDatabaseOperation,
type DropDatabaseOptions
} from './operations/drop';
import { executeOperation } from './operations/execute_operation';
import {
CreateIndexesOperation,
type CreateIndexesOptions,
type IndexDescriptionCompact,
type IndexDescriptionInfo,
type IndexInformationOptions,
type IndexSpecification
} from './operations/indexes';
import type { CollectionInfo, ListCollectionsOptions } from './operations/list_collections';
import { ProfilingLevelOperation, type ProfilingLevelOptions } from './operations/profiling_level';
import { RemoveUserOperation, type RemoveUserOptions } from './operations/remove_user';
import { RenameOperation, type RenameOptions } from './operations/rename';
import { RunCommandOperation, type RunCommandOptions } from './operations/run_command';
import {
type ProfilingLevel,
SetProfilingLevelOperation,
type SetProfilingLevelOptions
} from './operations/set_profiling_level';
import { DbStatsOperation, type DbStatsOptions } from './operations/stats';
import { ReadConcern } from './read_concern';
import { ReadPreference, type ReadPreferenceLike } from './read_preference';
import { DEFAULT_PK_FACTORY, filterOptions, MongoDBNamespace, resolveOptions } from './utils';
import { WriteConcern, type WriteConcernOptions } from './write_concern';
// Allowed parameters
const DB_OPTIONS_ALLOW_LIST = [
'writeConcern',
'readPreference',
'readPreferenceTags',
'native_parser',
'forceServerObjectId',
'pkFactory',
'serializeFunctions',
'raw',
'authSource',
'ignoreUndefined',
'readConcern',
'retryMiliSeconds',
'numberOfRetries',
'useBigInt64',
'promoteBuffers',
'promoteLongs',
'bsonRegExp',
'enableUtf8Validation',
'promoteValues',
'compression',
'retryWrites',
'timeoutMS'
];
/** @internal */
export interface DbPrivate {
options?: DbOptions;
readPreference?: ReadPreference;
pkFactory: PkFactory;
readConcern?: ReadConcern;
bsonOptions: BSONSerializeOptions;
writeConcern?: WriteConcern;
namespace: MongoDBNamespace;
}
/** @public */
export interface DbOptions extends BSONSerializeOptions, WriteConcernOptions {
/** If the database authentication is dependent on another databaseName. */
authSource?: string;
/** Force server to assign _id values instead of driver. */
forceServerObjectId?: boolean;
/** The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST). */
readPreference?: ReadPreferenceLike;
/** A primary key factory object for generation of custom _id keys. */
pkFactory?: PkFactory;
/** Specify a read concern for the collection. (only MongoDB 3.2 or higher supported) */
readConcern?: ReadConcern;
/** Should retry failed writes */
retryWrites?: boolean;
/**
* @experimental
* Specifies the time an operation will run until it throws a timeout error
*/
timeoutMS?: number;
}
/**
* The **Db** class is a class that represents a MongoDB Database.
* @public
*
* @example
* ```ts
* import { MongoClient } from 'mongodb';
*
* interface Pet {
* name: string;
* kind: 'dog' | 'cat' | 'fish';
* }
*
* const client = new MongoClient('mongodb://localhost:27017');
* const db = client.db();
*
* // Create a collection that validates our union
* await db.createCollection<Pet>('pets', {
* validator: { $expr: { $in: ['$kind', ['dog', 'cat', 'fish']] } }
* })
* ```
*/
export class Db {
/** @internal */
s: DbPrivate;
/** @internal */
readonly client: MongoClient;
public static SYSTEM_NAMESPACE_COLLECTION = CONSTANTS.SYSTEM_NAMESPACE_COLLECTION;
public static SYSTEM_INDEX_COLLECTION = CONSTANTS.SYSTEM_INDEX_COLLECTION;
public static SYSTEM_PROFILE_COLLECTION = CONSTANTS.SYSTEM_PROFILE_COLLECTION;
public static SYSTEM_USER_COLLECTION = CONSTANTS.SYSTEM_USER_COLLECTION;
public static SYSTEM_COMMAND_COLLECTION = CONSTANTS.SYSTEM_COMMAND_COLLECTION;
public static SYSTEM_JS_COLLECTION = CONSTANTS.SYSTEM_JS_COLLECTION;
/**
* Creates a new Db instance.
*
* Db name cannot contain a dot, the server may apply more restrictions when an operation is run.
*
* @param client - The MongoClient for the database.
* @param databaseName - The name of the database this instance represents.
* @param options - Optional settings for Db construction.
*/
constructor(client: MongoClient, databaseName: string, options?: DbOptions) {
options = options ?? {};
// Filter the options
options = filterOptions(options, DB_OPTIONS_ALLOW_LIST);
// Ensure there are no dots in database name
if (typeof databaseName === 'string' && databaseName.includes('.')) {
throw new MongoInvalidArgumentError(`Database names cannot contain the character '.'`);
}
// Internal state of the db object
this.s = {
// Options
options,
// Unpack read preference
readPreference: ReadPreference.fromOptions(options),
// Merge bson options
bsonOptions: resolveBSONOptions(options, client),
// Set up the primary key factory or fallback to ObjectId
pkFactory: options?.pkFactory ?? DEFAULT_PK_FACTORY,
// ReadConcern
readConcern: ReadConcern.fromOptions(options),
writeConcern: WriteConcern.fromOptions(options),
// Namespace
namespace: new MongoDBNamespace(databaseName)
};
this.client = client;
}
get databaseName(): string {
return this.s.namespace.db;
}
// Options
get options(): DbOptions | undefined {
return this.s.options;
}
/**
* Check if a secondary can be used (because the read preference is *not* set to primary)
*/
get secondaryOk(): boolean {
return this.s.readPreference?.preference !== 'primary' || false;
}
get readConcern(): ReadConcern | undefined {
return this.s.readConcern;
}
/**
* The current readPreference of the Db. If not explicitly defined for
* this Db, will be inherited from the parent MongoClient
*/
get readPreference(): ReadPreference {
if (this.s.readPreference == null) {
return this.client.readPreference;
}
return this.s.readPreference;
}
get bsonOptions(): BSONSerializeOptions {
return this.s.bsonOptions;
}
// get the write Concern
get writeConcern(): WriteConcern | undefined {
return this.s.writeConcern;
}
get namespace(): string {
return this.s.namespace.toString();
}
public get timeoutMS(): number | undefined {
return this.s.options?.timeoutMS;
}
/**
* Create a new collection on a server with the specified options. Use this to create capped collections.
* More information about command options available at https://www.mongodb.com/docs/manual/reference/command/create/
*
* Collection namespace validation is performed server-side.
*
* @param name - The name of the collection to create
* @param options - Optional settings for the command
*/
async createCollection<TSchema extends Document = Document>(
name: string,
options?: CreateCollectionOptions
): Promise<Collection<TSchema>> {
return await executeOperation(
this.client,
new CreateCollectionOperation(this, name, resolveOptions(this, options)) as TODO_NODE_3286
);
}
/**
* Execute a command
*
* @remarks
* This command does not inherit options from the MongoClient.
*
* The driver will ensure the following fields are attached to the command sent to the server:
* - `lsid` - sourced from an implicit session or options.session
* - `$readPreference` - defaults to primary or can be configured by options.readPreference
* - `$db` - sourced from the name of this database
*
* If the client has a serverApi setting:
* - `apiVersion`
* - `apiStrict`
* - `apiDeprecationErrors`
*
* When in a transaction:
* - `readConcern` - sourced from readConcern set on the TransactionOptions
* - `writeConcern` - sourced from writeConcern set on the TransactionOptions
*
* Attaching any of the above fields to the command will have no effect as the driver will overwrite the value.
*
* @param command - The command to run
* @param options - Optional settings for the command
*/
async command(command: Document, options?: RunCommandOptions & Abortable): Promise<Document> {
// Intentionally, we do not inherit options from parent for this operation.
return await executeOperation(
this.client,
new RunCommandOperation(
this,
command,
resolveOptions(undefined, {
...resolveBSONOptions(options),
timeoutMS: options?.timeoutMS ?? this.timeoutMS,
session: options?.session,
readPreference: options?.readPreference,
signal: options?.signal
})
)
);
}
/**
* Execute an aggregation framework pipeline against the database.
*
* @param pipeline - An array of aggregation stages to be executed
* @param options - Optional settings for the command
*/
aggregate<T extends Document = Document>(
pipeline: Document[] = [],
options?: AggregateOptions
): AggregationCursor<T> {
return new AggregationCursor(
this.client,
this.s.namespace,
pipeline,
resolveOptions(this, options)
);
}
/** Return the Admin db instance */
admin(): Admin {
return new Admin(this);
}
/**
* Returns a reference to a MongoDB Collection. If it does not exist it will be created implicitly.
*
* Collection namespace validation is performed server-side.
*
* @param name - the collection name we wish to access.
* @returns return the new Collection instance
*/
collection<TSchema extends Document = Document>(
name: string,
options: CollectionOptions = {}
): Collection<TSchema> {
if (typeof options === 'function') {
throw new MongoInvalidArgumentError('The callback form of this helper has been removed.');
}
return new Collection<TSchema>(this, name, resolveOptions(this, options));
}
/**
* Get all the db statistics.
*
* @param options - Optional settings for the command
*/
async stats(options?: DbStatsOptions): Promise<Document> {
return await executeOperation(
this.client,
new DbStatsOperation(this, resolveOptions(this, options))
);
}
/**
* List all collections of this database with optional filter
*
* @param filter - Query to filter collections by
* @param options - Optional settings for the command
*/
listCollections(
filter: Document,
options: Exclude<ListCollectionsOptions, 'nameOnly'> & { nameOnly: true } & Abortable
): ListCollectionsCursor<Pick<CollectionInfo, 'name' | 'type'>>;
listCollections(
filter: Document,
options: Exclude<ListCollectionsOptions, 'nameOnly'> & { nameOnly: false } & Abortable
): ListCollectionsCursor<CollectionInfo>;
listCollections<
T extends Pick<CollectionInfo, 'name' | 'type'> | CollectionInfo =
| Pick<CollectionInfo, 'name' | 'type'>
| CollectionInfo
>(filter?: Document, options?: ListCollectionsOptions & Abortable): ListCollectionsCursor<T>;
listCollections<
T extends Pick<CollectionInfo, 'name' | 'type'> | CollectionInfo =
| Pick<CollectionInfo, 'name' | 'type'>
| CollectionInfo
>(
filter: Document = {},
options: ListCollectionsOptions & Abortable = {}
): ListCollectionsCursor<T> {
return new ListCollectionsCursor<T>(this, filter, resolveOptions(this, options));
}
/**
* Rename a collection.
*
* @remarks
* This operation does not inherit options from the MongoClient.
*
* @param fromCollection - Name of current collection to rename
* @param toCollection - New name of of the collection
* @param options - Optional settings for the command
*/
async renameCollection<TSchema extends Document = Document>(
fromCollection: string,
toCollection: string,
options?: RenameOptions
): Promise<Collection<TSchema>> {
// Intentionally, we do not inherit options from parent for this operation.
return await executeOperation(
this.client,
new RenameOperation(
this.collection<TSchema>(fromCollection) as TODO_NODE_3286,
toCollection,
resolveOptions(undefined, {
...options,
new_collection: true,
readPreference: ReadPreference.primary
})
) as TODO_NODE_3286
);
}
/**
* Drop a collection from the database, removing it permanently. New accesses will create a new collection.
*
* @param name - Name of collection to drop
* @param options - Optional settings for the command
*/
async dropCollection(name: string, options?: DropCollectionOptions): Promise<boolean> {
return await executeOperation(
this.client,
new DropCollectionOperation(this, name, resolveOptions(this, options))
);
}
/**
* Drop a database, removing it permanently from the server.
*
* @param options - Optional settings for the command
*/
async dropDatabase(options?: DropDatabaseOptions): Promise<boolean> {
return await executeOperation(
this.client,
new DropDatabaseOperation(this, resolveOptions(this, options))
);
}
/**
* Fetch all collections for the current db.
*
* @param options - Optional settings for the command
*/
async collections(options?: ListCollectionsOptions): Promise<Collection[]> {
return await executeOperation(
this.client,
new CollectionsOperation(this, resolveOptions(this, options))
);
}
/**
* Creates an index on the db and collection.
*
* @param name - Name of the collection to create the index on.
* @param indexSpec - Specify the field to index, or an index specification
* @param options - Optional settings for the command
*/
async createIndex(
name: string,
indexSpec: IndexSpecification,
options?: CreateIndexesOptions
): Promise<string> {
const indexes = await executeOperation(
this.client,
CreateIndexesOperation.fromIndexSpecification(this, name, indexSpec, options)
);
return indexes[0];
}
/**
* Remove a user from a database
*
* @param username - The username to remove
* @param options - Optional settings for the command
*/
async removeUser(username: string, options?: RemoveUserOptions): Promise<boolean> {
return await executeOperation(
this.client,
new RemoveUserOperation(this, username, resolveOptions(this, options))
);
}
/**
* Set the current profiling level of MongoDB
*
* @param level - The new profiling level (off, slow_only, all).
* @param options - Optional settings for the command
*/
async setProfilingLevel(
level: ProfilingLevel,
options?: SetProfilingLevelOptions
): Promise<ProfilingLevel> {
return await executeOperation(
this.client,
new SetProfilingLevelOperation(this, level, resolveOptions(this, options))
);
}
/**
* Retrieve the current profiling Level for MongoDB
*
* @param options - Optional settings for the command
*/
async profilingLevel(options?: ProfilingLevelOptions): Promise<string> {
return await executeOperation(
this.client,
new ProfilingLevelOperation(this, resolveOptions(this, options))
);
}
/**
* Retrieves this collections index info.
*
* @param name - The name of the collection.
* @param options - Optional settings for the command
*/
indexInformation(
name: string,
options: IndexInformationOptions & { full: true }
): Promise<IndexDescriptionInfo[]>;
indexInformation(
name: string,
options: IndexInformationOptions & { full?: false }
): Promise<IndexDescriptionCompact>;
indexInformation(
name: string,
options: IndexInformationOptions
): Promise<IndexDescriptionCompact | IndexDescriptionInfo[]>;
indexInformation(name: string): Promise<IndexDescriptionCompact>;
async indexInformation(
name: string,
options?: IndexInformationOptions
): Promise<IndexDescriptionCompact | IndexDescriptionInfo[]> {
return await this.collection(name).indexInformation(resolveOptions(this, options));
}
/**
* Create a new Change Stream, watching for new changes (insertions, updates,
* replacements, deletions, and invalidations) in this database. Will ignore all
* changes to system collections.
*
* @remarks
* watch() accepts two generic arguments for distinct use cases:
* - The first is to provide the schema that may be defined for all the collections within this database
* - The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument
*
* @remarks
* When `timeoutMS` is configured for a change stream, it will have different behaviour depending
* on whether the change stream is in iterator mode or emitter mode. In both cases, a change
* stream will time out if it does not receive a change event within `timeoutMS` of the last change
* event.
*
* Note that if a change stream is consistently timing out when watching a collection, database or
* client that is being changed, then this may be due to the server timing out before it can finish
* processing the existing oplog. To address this, restart the change stream with a higher
* `timeoutMS`.
*
* If the change stream times out the initial aggregate operation to establish the change stream on
* the server, then the client will close the change stream. If the getMore calls to the server
* time out, then the change stream will be left open, but will throw a MongoOperationTimeoutError
* when in iterator mode and emit an error event that returns a MongoOperationTimeoutError in
* emitter mode.
*
* To determine whether or not the change stream is still open following a timeout, check the
* {@link ChangeStream.closed} getter.
*
* @example
* In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream.
* The next call can just be retried after this succeeds.
* ```ts
* const changeStream = collection.watch([], { timeoutMS: 100 });
* try {
* await changeStream.next();
* } catch (e) {
* if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
* await changeStream.next();
* }
* throw e;
* }
* ```
*
* @example
* In emitter mode, if the change stream goes `timeoutMS` without emitting a change event, it will
* emit an error event that returns a MongoOperationTimeoutError, but will not close the change
* stream unless the resume attempt fails. There is no need to re-establish change listeners as
* this will automatically continue emitting change events once the resume attempt completes.
*
* ```ts
* const changeStream = collection.watch([], { timeoutMS: 100 });
* changeStream.on('change', console.log);
* changeStream.on('error', e => {
* if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
* // do nothing
* } else {
* changeStream.close();
* }
* });
* ```
* @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
* @param options - Optional settings for the command
* @typeParam TSchema - Type of the data being detected by the change stream
* @typeParam TChange - Type of the whole change stream document emitted
*/
watch<
TSchema extends Document = Document,
TChange extends Document = ChangeStreamDocument<TSchema>
>(pipeline: Document[] = [], options: ChangeStreamOptions = {}): ChangeStream<TSchema, TChange> {
// Allow optionally not specifying a pipeline
if (!Array.isArray(pipeline)) {
options = pipeline;
pipeline = [];
}
return new ChangeStream<TSchema, TChange>(this, pipeline, resolveOptions(this, options));
}
/**
* A low level cursor API providing basic driver functionality:
* - ClientSession management
* - ReadPreference for server selection
* - Running getMores automatically when a local batch is exhausted
*
* @param command - The command that will start a cursor on the server.
* @param options - Configurations for running the command, bson options will apply to getMores
*/
runCursorCommand(command: Document, options?: RunCursorCommandOptions): RunCommandCursor {
return new RunCommandCursor(this, command, options);
}
}

View file

@ -0,0 +1,287 @@
import { type Stream } from './cmap/connect';
import { MongoMissingDependencyError } from './error';
import type { Callback } from './utils';
function makeErrorModule(error: any) {
const props = error ? { kModuleError: error } : {};
return new Proxy(props, {
get: (_: any, key: any) => {
if (key === 'kModuleError') {
return error;
}
throw error;
},
set: () => {
throw error;
}
});
}
export type Kerberos = typeof import('kerberos') | { kModuleError: MongoMissingDependencyError };
export function getKerberos(): Kerberos {
let kerberos: Kerberos;
try {
// Ensure you always wrap an optional require in the try block NODE-3199
// eslint-disable-next-line @typescript-eslint/no-require-imports
kerberos = require('kerberos');
} catch (error) {
kerberos = makeErrorModule(
new MongoMissingDependencyError(
'Optional module `kerberos` not found. Please install it to enable kerberos authentication',
{ cause: error, dependencyName: 'kerberos' }
)
);
}
return kerberos;
}
export interface KerberosClient {
step(challenge: string): Promise<string>;
step(challenge: string, callback: Callback<string>): void;
wrap(challenge: string, options: { user: string }): Promise<string>;
wrap(challenge: string, options: { user: string }, callback: Callback<string>): void;
unwrap(challenge: string): Promise<string>;
unwrap(challenge: string, callback: Callback<string>): void;
}
type ZStandardLib = {
/**
* Compress using zstd.
* @param buf - Buffer to be compressed.
*/
compress(buf: Buffer, level?: number): Promise<Buffer>;
/**
* Decompress using zstd.
*/
decompress(buf: Buffer): Promise<Buffer>;
};
export type ZStandard = ZStandardLib | { kModuleError: MongoMissingDependencyError };
export function getZstdLibrary(): ZStandardLib | { kModuleError: MongoMissingDependencyError } {
let ZStandard: ZStandardLib | { kModuleError: MongoMissingDependencyError };
try {
// eslint-disable-next-line @typescript-eslint/no-require-imports
ZStandard = require('@mongodb-js/zstd');
} catch (error) {
ZStandard = makeErrorModule(
new MongoMissingDependencyError(
'Optional module `@mongodb-js/zstd` not found. Please install it to enable zstd compression',
{ cause: error, dependencyName: 'zstd' }
)
);
}
return ZStandard;
}
/**
* @public
* Copy of the AwsCredentialIdentityProvider interface from [`smithy/types`](https://socket.dev/npm/package/\@smithy/types/files/1.1.1/dist-types/identity/awsCredentialIdentity.d.ts),
* the return type of the aws-sdk's `fromNodeProviderChain().provider()`.
*/
export interface AWSCredentials {
accessKeyId: string;
secretAccessKey: string;
sessionToken?: string;
expiration?: Date;
}
type CredentialProvider = {
fromNodeProviderChain(
this: void,
options: { clientConfig: { region: string } }
): () => Promise<AWSCredentials>;
fromNodeProviderChain(this: void): () => Promise<AWSCredentials>;
};
export function getAwsCredentialProvider():
| CredentialProvider
| { kModuleError: MongoMissingDependencyError } {
try {
// Ensure you always wrap an optional require in the try block NODE-3199
// eslint-disable-next-line @typescript-eslint/no-require-imports
const credentialProvider = require('@aws-sdk/credential-providers');
return credentialProvider;
} catch (error) {
return makeErrorModule(
new MongoMissingDependencyError(
'Optional module `@aws-sdk/credential-providers` not found.' +
' Please install it to enable getting aws credentials via the official sdk.',
{ cause: error, dependencyName: '@aws-sdk/credential-providers' }
)
);
}
}
/** @internal */
export type GcpMetadata =
| typeof import('gcp-metadata')
| { kModuleError: MongoMissingDependencyError };
export function getGcpMetadata(): GcpMetadata {
try {
// Ensure you always wrap an optional require in the try block NODE-3199
// eslint-disable-next-line @typescript-eslint/no-require-imports
const credentialProvider = require('gcp-metadata');
return credentialProvider;
} catch (error) {
return makeErrorModule(
new MongoMissingDependencyError(
'Optional module `gcp-metadata` not found.' +
' Please install it to enable getting gcp credentials via the official sdk.',
{ cause: error, dependencyName: 'gcp-metadata' }
)
);
}
}
/** @internal */
export type SnappyLib = {
/**
* In order to support both we must check the return value of the function
* @param buf - Buffer to be compressed
*/
compress(buf: Buffer): Promise<Buffer>;
/**
* In order to support both we must check the return value of the function
* @param buf - Buffer to be compressed
*/
uncompress(buf: Buffer, opt: { asBuffer: true }): Promise<Buffer>;
};
export function getSnappy(): SnappyLib | { kModuleError: MongoMissingDependencyError } {
try {
// Ensure you always wrap an optional require in the try block NODE-3199
// eslint-disable-next-line @typescript-eslint/no-require-imports
const value = require('snappy');
return value;
} catch (error) {
const kModuleError = new MongoMissingDependencyError(
'Optional module `snappy` not found. Please install it to enable snappy compression',
{ cause: error, dependencyName: 'snappy' }
);
return { kModuleError };
}
}
export type SocksLib = {
SocksClient: {
createConnection(options: {
command: 'connect';
destination: { host: string; port: number };
proxy: {
/** host and port are ignored because we pass existing_socket */
host: 'iLoveJavaScript';
port: 0;
type: 5;
userId?: string;
password?: string;
};
timeout?: number;
/** We always create our own socket, and pass it to this API for proxy negotiation */
existing_socket: Stream;
}): Promise<{ socket: Stream }>;
};
};
export function getSocks(): SocksLib | { kModuleError: MongoMissingDependencyError } {
try {
// Ensure you always wrap an optional require in the try block NODE-3199
// eslint-disable-next-line @typescript-eslint/no-require-imports
const value = require('socks');
return value;
} catch (error) {
const kModuleError = new MongoMissingDependencyError(
'Optional module `socks` not found. Please install it to connections over a SOCKS5 proxy',
{ cause: error, dependencyName: 'socks' }
);
return { kModuleError };
}
}
interface AWS4 {
/**
* Created these inline types to better assert future usage of this API
* @param options - options for request
* @param credentials - AWS credential details, sessionToken should be omitted entirely if its false-y
*/
sign(
this: void,
options: {
path: '/';
body: string;
host: string;
method: 'POST';
headers: {
'Content-Type': 'application/x-www-form-urlencoded';
'Content-Length': number;
'X-MongoDB-Server-Nonce': string;
'X-MongoDB-GS2-CB-Flag': 'n';
};
service: string;
region: string;
},
credentials:
| {
accessKeyId: string;
secretAccessKey: string;
sessionToken: string;
}
| {
accessKeyId: string;
secretAccessKey: string;
}
| undefined
): {
headers: {
Authorization: string;
'X-Amz-Date': string;
};
};
}
export const aws4: AWS4 | { kModuleError: MongoMissingDependencyError } = loadAws4();
function loadAws4() {
let aws4: AWS4 | { kModuleError: MongoMissingDependencyError };
try {
// eslint-disable-next-line @typescript-eslint/no-require-imports
aws4 = require('aws4');
} catch (error) {
aws4 = makeErrorModule(
new MongoMissingDependencyError(
'Optional module `aws4` not found. Please install it to enable AWS authentication',
{ cause: error, dependencyName: 'aws4' }
)
);
}
return aws4;
}
/** A utility function to get the instance of mongodb-client-encryption, if it exists. */
export function getMongoDBClientEncryption():
| typeof import('mongodb-client-encryption')
| { kModuleError: MongoMissingDependencyError } {
let mongodbClientEncryption = null;
try {
// NOTE(NODE-3199): Ensure you always wrap an optional require literally in the try block
// Cannot be moved to helper utility function, bundlers search and replace the actual require call
// in a way that makes this line throw at bundle time, not runtime, catching here will make bundling succeed
// eslint-disable-next-line @typescript-eslint/no-require-imports
mongodbClientEncryption = require('mongodb-client-encryption');
} catch (error) {
const kModuleError = new MongoMissingDependencyError(
'Optional module `mongodb-client-encryption` not found. Please install it to use auto encryption or ClientEncryption.',
{ cause: error, dependencyName: 'mongodb-client-encryption' }
);
return { kModuleError };
}
return mongodbClientEncryption;
}

View file

@ -0,0 +1,134 @@
import { callbackify } from 'util';
import { AutoEncrypter, type AutoEncryptionOptions } from './client-side-encryption/auto_encrypter';
import { MONGO_CLIENT_EVENTS } from './constants';
import { getMongoDBClientEncryption } from './deps';
import { MongoInvalidArgumentError, MongoMissingDependencyError } from './error';
import { MongoClient, type MongoClientOptions } from './mongo_client';
import { type Callback } from './utils';
/** @internal */
export interface EncrypterOptions {
autoEncryption: AutoEncryptionOptions;
maxPoolSize?: number;
}
/** @internal */
export class Encrypter {
private internalClient: MongoClient | null;
bypassAutoEncryption: boolean;
needsConnecting: boolean;
autoEncrypter: AutoEncrypter;
constructor(client: MongoClient, uri: string, options: MongoClientOptions) {
if (typeof options.autoEncryption !== 'object') {
throw new MongoInvalidArgumentError('Option "autoEncryption" must be specified');
}
// initialize to null, if we call getInternalClient, we may set this it is important to not overwrite those function calls.
this.internalClient = null;
this.bypassAutoEncryption = !!options.autoEncryption.bypassAutoEncryption;
this.needsConnecting = false;
if (options.maxPoolSize === 0 && options.autoEncryption.keyVaultClient == null) {
options.autoEncryption.keyVaultClient = client;
} else if (options.autoEncryption.keyVaultClient == null) {
options.autoEncryption.keyVaultClient = this.getInternalClient(client, uri, options);
}
if (this.bypassAutoEncryption) {
options.autoEncryption.metadataClient = undefined;
} else if (options.maxPoolSize === 0) {
options.autoEncryption.metadataClient = client;
} else {
options.autoEncryption.metadataClient = this.getInternalClient(client, uri, options);
}
if (options.proxyHost) {
options.autoEncryption.proxyOptions = {
proxyHost: options.proxyHost,
proxyPort: options.proxyPort,
proxyUsername: options.proxyUsername,
proxyPassword: options.proxyPassword
};
}
this.autoEncrypter = new AutoEncrypter(client, options.autoEncryption);
}
getInternalClient(client: MongoClient, uri: string, options: MongoClientOptions): MongoClient {
let internalClient = this.internalClient;
if (internalClient == null) {
const clonedOptions: MongoClientOptions = {};
for (const key of [
...Object.getOwnPropertyNames(options),
...Object.getOwnPropertySymbols(options)
] as string[]) {
if (['autoEncryption', 'minPoolSize', 'servers', 'caseTranslate', 'dbName'].includes(key))
continue;
Reflect.set(clonedOptions, key, Reflect.get(options, key));
}
clonedOptions.minPoolSize = 0;
internalClient = new MongoClient(uri, clonedOptions);
this.internalClient = internalClient;
for (const eventName of MONGO_CLIENT_EVENTS) {
for (const listener of client.listeners(eventName)) {
internalClient.on(eventName, listener);
}
}
client.on('newListener', (eventName, listener) => {
internalClient?.on(eventName, listener);
});
this.needsConnecting = true;
}
return internalClient;
}
async connectInternalClient(): Promise<void> {
const internalClient = this.internalClient;
if (this.needsConnecting && internalClient != null) {
this.needsConnecting = false;
await internalClient.connect();
}
}
closeCallback(client: MongoClient, force: boolean, callback: Callback<void>) {
callbackify(this.close.bind(this))(client, force, callback);
}
async close(client: MongoClient, force: boolean): Promise<void> {
let error;
try {
await this.autoEncrypter.teardown(force);
} catch (autoEncrypterError) {
error = autoEncrypterError;
}
const internalClient = this.internalClient;
if (internalClient != null && client !== internalClient) {
return await internalClient.close(force);
}
if (error != null) {
throw error;
}
}
static checkForMongoCrypt(): void {
const mongodbClientEncryption = getMongoDBClientEncryption();
if ('kModuleError' in mongodbClientEncryption) {
throw new MongoMissingDependencyError(
'Auto-encryption requested, but the module is not installed. ' +
'Please add `mongodb-client-encryption` as a dependency of your project',
{
cause: mongodbClientEncryption['kModuleError'],
dependencyName: 'mongodb-client-encryption'
}
);
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,173 @@
import { type Document } from './bson';
import { AbstractCursor } from './cursor/abstract_cursor';
import { MongoAPIError } from './error';
/** @public */
export const ExplainVerbosity = Object.freeze({
queryPlanner: 'queryPlanner',
queryPlannerExtended: 'queryPlannerExtended',
executionStats: 'executionStats',
allPlansExecution: 'allPlansExecution'
} as const);
/** @public */
export type ExplainVerbosity = string;
/**
* For backwards compatibility, true is interpreted as "allPlansExecution"
* and false as "queryPlanner".
* @public
*/
export type ExplainVerbosityLike = ExplainVerbosity | boolean;
/** @public */
export interface ExplainCommandOptions {
/** The explain verbosity for the command. */
verbosity: ExplainVerbosity;
/** The maxTimeMS setting for the command. */
maxTimeMS?: number;
}
/**
* @public
*
* When set, this configures an explain command. Valid values are boolean (for legacy compatibility,
* see {@link ExplainVerbosityLike}), a string containing the explain verbosity, or an object containing the verbosity and
* an optional maxTimeMS.
*
* Examples of valid usage:
*
* ```typescript
* collection.find({ name: 'john doe' }, { explain: true });
* collection.find({ name: 'john doe' }, { explain: false });
* collection.find({ name: 'john doe' }, { explain: 'queryPlanner' });
* collection.find({ name: 'john doe' }, { explain: { verbosity: 'queryPlanner' } });
* ```
*
* maxTimeMS can be configured to limit the amount of time the server
* spends executing an explain by providing an object:
*
* ```typescript
* // limits the `explain` command to no more than 2 seconds
* collection.find({ name: 'john doe' }, {
* explain: {
* verbosity: 'queryPlanner',
* maxTimeMS: 2000
* }
* });
* ```
*/
export interface ExplainOptions {
/** Specifies the verbosity mode for the explain output. */
explain?: ExplainVerbosityLike | ExplainCommandOptions;
}
/** @internal */
export class Explain {
readonly verbosity: ExplainVerbosity;
readonly maxTimeMS?: number;
private constructor(verbosity: ExplainVerbosityLike, maxTimeMS?: number) {
if (typeof verbosity === 'boolean') {
this.verbosity = verbosity
? ExplainVerbosity.allPlansExecution
: ExplainVerbosity.queryPlanner;
} else {
this.verbosity = verbosity;
}
this.maxTimeMS = maxTimeMS;
}
static fromOptions({ explain }: ExplainOptions = {}): Explain | undefined {
if (explain == null) return;
if (typeof explain === 'boolean' || typeof explain === 'string') {
return new Explain(explain);
}
const { verbosity, maxTimeMS } = explain;
return new Explain(verbosity, maxTimeMS);
}
}
export function validateExplainTimeoutOptions(options: Document, explain?: Explain) {
const { maxTimeMS, timeoutMS } = options;
if (timeoutMS != null && (maxTimeMS != null || explain?.maxTimeMS != null)) {
throw new MongoAPIError('Cannot use maxTimeMS with timeoutMS for explain commands.');
}
}
/**
* Applies an explain to a given command.
* @internal
*
* @param command - the command on which to apply the explain
* @param options - the options containing the explain verbosity
*/
export function decorateWithExplain(
command: Document,
explain: Explain
): {
explain: Document;
verbosity: ExplainVerbosity;
maxTimeMS?: number;
} {
type ExplainCommand = ReturnType<typeof decorateWithExplain>;
const { verbosity, maxTimeMS } = explain;
const baseCommand: ExplainCommand = { explain: command, verbosity };
if (typeof maxTimeMS === 'number') {
baseCommand.maxTimeMS = maxTimeMS;
}
return baseCommand;
}
/**
* @public
*
* A base class for any cursors that have `explain()` methods.
*/
export abstract class ExplainableCursor<TSchema> extends AbstractCursor<TSchema> {
/** Execute the explain for the cursor */
abstract explain(): Promise<Document>;
abstract explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise<Document>;
abstract explain(options: { timeoutMS?: number }): Promise<Document>;
abstract explain(
verbosity: ExplainVerbosityLike | ExplainCommandOptions,
options: { timeoutMS?: number }
): Promise<Document>;
abstract explain(
verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number },
options?: { timeoutMS?: number }
): Promise<Document>;
protected resolveExplainTimeoutOptions(
verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number },
options?: { timeoutMS?: number }
): { timeout?: { timeoutMS?: number }; explain?: ExplainVerbosityLike | ExplainCommandOptions } {
let explain: ExplainVerbosityLike | ExplainCommandOptions | undefined;
let timeout: { timeoutMS?: number } | undefined;
if (verbosity == null && options == null) {
explain = undefined;
timeout = undefined;
} else if (verbosity != null && options == null) {
explain =
typeof verbosity !== 'object'
? verbosity
: 'verbosity' in verbosity
? verbosity
: undefined;
timeout = typeof verbosity === 'object' && 'timeoutMS' in verbosity ? verbosity : undefined;
} else {
// @ts-expect-error TS isn't smart enough to determine that if both options are provided, the first is explain options
explain = verbosity;
timeout = options;
}
return { timeout, explain };
}
}

View file

@ -0,0 +1,487 @@
import { Readable } from 'stream';
import type { Document, ObjectId } from '../bson';
import type { Collection } from '../collection';
import { CursorTimeoutMode } from '../cursor/abstract_cursor';
import type { FindCursor } from '../cursor/find_cursor';
import {
MongoGridFSChunkError,
MongoGridFSStreamError,
MongoInvalidArgumentError,
MongoRuntimeError
} from '../error';
import type { FindOptions } from '../operations/find';
import type { ReadPreference } from '../read_preference';
import type { Sort } from '../sort';
import { CSOTTimeoutContext } from '../timeout';
import type { Callback } from '../utils';
import type { GridFSChunk } from './upload';
/** @public */
export interface GridFSBucketReadStreamOptions {
sort?: Sort;
skip?: number;
/**
* 0-indexed non-negative byte offset from the beginning of the file
*/
start?: number;
/**
* 0-indexed non-negative byte offset to the end of the file contents
* to be returned by the stream. `end` is non-inclusive
*/
end?: number;
/**
* @experimental
* Specifies the time an operation will run until it throws a timeout error
*/
timeoutMS?: number;
}
/** @public */
export interface GridFSBucketReadStreamOptionsWithRevision extends GridFSBucketReadStreamOptions {
/** The revision number relative to the oldest file with the given filename. 0
* gets you the oldest file, 1 gets you the 2nd oldest, -1 gets you the
* newest. */
revision?: number;
}
/** @public */
export interface GridFSFile {
_id: ObjectId;
length: number;
chunkSize: number;
filename: string;
metadata?: Document;
uploadDate: Date;
/** @deprecated Will be removed in the next major version. */
contentType?: string;
/** @deprecated Will be removed in the next major version. */
aliases?: string[];
}
/** @internal */
export interface GridFSBucketReadStreamPrivate {
/**
* The running total number of bytes read from the chunks collection.
*/
bytesRead: number;
/**
* The number of bytes to remove from the last chunk read in the file. This is non-zero
* if `end` is not equal to the length of the document and `end` is not a multiple
* of the chunkSize.
*/
bytesToTrim: number;
/**
* The number of bytes to remove from the first chunk read in the file. This is non-zero
* if `start` is not equal to the 0 and `start` is not a multiple
* of the chunkSize.
*/
bytesToSkip: number;
files: Collection<GridFSFile>;
chunks: Collection<GridFSChunk>;
cursor?: FindCursor<GridFSChunk>;
/** The running total number of chunks read from the chunks collection. */
expected: number;
/**
* The filter used to search in the _files_ collection (i.e., `{ _id: <> }`)
* This is not the same filter used when reading chunks from the chunks collection.
*/
filter: Document;
/** Indicates whether or not download has started. */
init: boolean;
/** The expected number of chunks to read, calculated from start, end, chunkSize and file length. */
expectedEnd: number;
file?: GridFSFile;
options: {
sort?: Sort;
skip?: number;
start: number;
end: number;
timeoutMS?: number;
};
readPreference?: ReadPreference;
timeoutContext?: CSOTTimeoutContext;
}
/**
* A readable stream that enables you to read buffers from GridFS.
*
* Do not instantiate this class directly. Use `openDownloadStream()` instead.
* @public
*/
export class GridFSBucketReadStream extends Readable {
/** @internal */
s: GridFSBucketReadStreamPrivate;
/**
* Fires when the stream loaded the file document corresponding to the provided id.
* @event
*/
static readonly FILE = 'file' as const;
/**
* @param chunks - Handle for chunks collection
* @param files - Handle for files collection
* @param readPreference - The read preference to use
* @param filter - The filter to use to find the file document
* @internal
*/
constructor(
chunks: Collection<GridFSChunk>,
files: Collection<GridFSFile>,
readPreference: ReadPreference | undefined,
filter: Document,
options?: GridFSBucketReadStreamOptions
) {
super({ emitClose: true });
this.s = {
bytesToTrim: 0,
bytesToSkip: 0,
bytesRead: 0,
chunks,
expected: 0,
files,
filter,
init: false,
expectedEnd: 0,
options: {
start: 0,
end: 0,
...options
},
readPreference,
timeoutContext:
options?.timeoutMS != null
? new CSOTTimeoutContext({ timeoutMS: options.timeoutMS, serverSelectionTimeoutMS: 0 })
: undefined
};
}
/**
* Reads from the cursor and pushes to the stream.
* Private Impl, do not call directly
* @internal
*/
override _read(): void {
if (this.destroyed) return;
waitForFile(this, () => doRead(this));
}
/**
* Sets the 0-based offset in bytes to start streaming from. Throws
* an error if this stream has entered flowing mode
* (e.g. if you've already called `on('data')`)
*
* @param start - 0-based offset in bytes to start streaming from
*/
start(start = 0): this {
throwIfInitialized(this);
this.s.options.start = start;
return this;
}
/**
* Sets the 0-based offset in bytes to start streaming from. Throws
* an error if this stream has entered flowing mode
* (e.g. if you've already called `on('data')`)
*
* @param end - Offset in bytes to stop reading at
*/
end(end = 0): this {
throwIfInitialized(this);
this.s.options.end = end;
return this;
}
/**
* Marks this stream as aborted (will never push another `data` event)
* and kills the underlying cursor. Will emit the 'end' event, and then
* the 'close' event once the cursor is successfully killed.
*/
async abort(): Promise<void> {
this.push(null);
this.destroy();
const remainingTimeMS = this.s.timeoutContext?.getRemainingTimeMSOrThrow();
await this.s.cursor?.close({ timeoutMS: remainingTimeMS });
}
}
function throwIfInitialized(stream: GridFSBucketReadStream): void {
if (stream.s.init) {
throw new MongoGridFSStreamError('Options cannot be changed after the stream is initialized');
}
}
function doRead(stream: GridFSBucketReadStream): void {
if (stream.destroyed) return;
if (!stream.s.cursor) return;
if (!stream.s.file) return;
const handleReadResult = (doc: Document | null) => {
if (stream.destroyed) return;
if (!doc) {
stream.push(null);
stream.s.cursor?.close().then(undefined, error => stream.destroy(error));
return;
}
if (!stream.s.file) return;
const bytesRemaining = stream.s.file.length - stream.s.bytesRead;
const expectedN = stream.s.expected++;
const expectedLength = Math.min(stream.s.file.chunkSize, bytesRemaining);
if (doc.n > expectedN) {
return stream.destroy(
new MongoGridFSChunkError(
`ChunkIsMissing: Got unexpected n: ${doc.n}, expected: ${expectedN}`
)
);
}
if (doc.n < expectedN) {
return stream.destroy(
new MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected: ${expectedN}`)
);
}
let buf = Buffer.isBuffer(doc.data) ? doc.data : doc.data.buffer;
if (buf.byteLength !== expectedLength) {
if (bytesRemaining <= 0) {
return stream.destroy(
new MongoGridFSChunkError(
`ExtraChunk: Got unexpected n: ${doc.n}, expected file length ${stream.s.file.length} bytes but already read ${stream.s.bytesRead} bytes`
)
);
}
return stream.destroy(
new MongoGridFSChunkError(
`ChunkIsWrongSize: Got unexpected length: ${buf.byteLength}, expected: ${expectedLength}`
)
);
}
stream.s.bytesRead += buf.byteLength;
if (buf.byteLength === 0) {
return stream.push(null);
}
let sliceStart = null;
let sliceEnd = null;
if (stream.s.bytesToSkip != null) {
sliceStart = stream.s.bytesToSkip;
stream.s.bytesToSkip = 0;
}
const atEndOfStream = expectedN === stream.s.expectedEnd - 1;
const bytesLeftToRead = stream.s.options.end - stream.s.bytesToSkip;
if (atEndOfStream && stream.s.bytesToTrim != null) {
sliceEnd = stream.s.file.chunkSize - stream.s.bytesToTrim;
} else if (stream.s.options.end && bytesLeftToRead < doc.data.byteLength) {
sliceEnd = bytesLeftToRead;
}
if (sliceStart != null || sliceEnd != null) {
buf = buf.slice(sliceStart || 0, sliceEnd || buf.byteLength);
}
stream.push(buf);
return;
};
stream.s.cursor.next().then(handleReadResult, error => {
if (stream.destroyed) return;
stream.destroy(error);
});
}
function init(stream: GridFSBucketReadStream): void {
const findOneOptions: FindOptions = {};
if (stream.s.readPreference) {
findOneOptions.readPreference = stream.s.readPreference;
}
if (stream.s.options && stream.s.options.sort) {
findOneOptions.sort = stream.s.options.sort;
}
if (stream.s.options && stream.s.options.skip) {
findOneOptions.skip = stream.s.options.skip;
}
const handleReadResult = (doc: Document | null) => {
if (stream.destroyed) return;
if (!doc) {
const identifier = stream.s.filter._id
? stream.s.filter._id.toString()
: stream.s.filter.filename;
const errmsg = `FileNotFound: file ${identifier} was not found`;
// TODO(NODE-3483)
const err = new MongoRuntimeError(errmsg);
err.code = 'ENOENT'; // TODO: NODE-3338 set property as part of constructor
return stream.destroy(err);
}
// If document is empty, kill the stream immediately and don't
// execute any reads
if (doc.length <= 0) {
stream.push(null);
return;
}
if (stream.destroyed) {
// If user destroys the stream before we have a cursor, wait
// until the query is done to say we're 'closed' because we can't
// cancel a query.
stream.destroy();
return;
}
try {
stream.s.bytesToSkip = handleStartOption(stream, doc, stream.s.options);
} catch (error) {
return stream.destroy(error);
}
const filter: Document = { files_id: doc._id };
// Currently (MongoDB 3.4.4) skip function does not support the index,
// it needs to retrieve all the documents first and then skip them. (CS-25811)
// As work around we use $gte on the "n" field.
if (stream.s.options && stream.s.options.start != null) {
const skip = Math.floor(stream.s.options.start / doc.chunkSize);
if (skip > 0) {
filter['n'] = { $gte: skip };
}
}
let remainingTimeMS: number | undefined;
try {
remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow(
`Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms`
);
} catch (error) {
return stream.destroy(error);
}
stream.s.cursor = stream.s.chunks
.find(filter, {
timeoutMode: stream.s.options.timeoutMS != null ? CursorTimeoutMode.LIFETIME : undefined,
timeoutMS: remainingTimeMS
})
.sort({ n: 1 });
if (stream.s.readPreference) {
stream.s.cursor.withReadPreference(stream.s.readPreference);
}
stream.s.expectedEnd = Math.ceil(doc.length / doc.chunkSize);
stream.s.file = doc as GridFSFile;
try {
stream.s.bytesToTrim = handleEndOption(stream, doc, stream.s.cursor, stream.s.options);
} catch (error) {
return stream.destroy(error);
}
stream.emit(GridFSBucketReadStream.FILE, doc);
return;
};
let remainingTimeMS: number | undefined;
try {
remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow(
`Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms`
);
} catch (error) {
if (!stream.destroyed) stream.destroy(error);
return;
}
findOneOptions.timeoutMS = remainingTimeMS;
stream.s.files.findOne(stream.s.filter, findOneOptions).then(handleReadResult, error => {
if (stream.destroyed) return;
stream.destroy(error);
});
}
function waitForFile(stream: GridFSBucketReadStream, callback: Callback): void {
if (stream.s.file) {
return callback();
}
if (!stream.s.init) {
init(stream);
stream.s.init = true;
}
stream.once('file', () => {
callback();
});
}
function handleStartOption(
stream: GridFSBucketReadStream,
doc: Document,
options: GridFSBucketReadStreamOptions
): number {
if (options && options.start != null) {
if (options.start > doc.length) {
throw new MongoInvalidArgumentError(
`Stream start (${options.start}) must not be more than the length of the file (${doc.length})`
);
}
if (options.start < 0) {
throw new MongoInvalidArgumentError(`Stream start (${options.start}) must not be negative`);
}
if (options.end != null && options.end < options.start) {
throw new MongoInvalidArgumentError(
`Stream start (${options.start}) must not be greater than stream end (${options.end})`
);
}
stream.s.bytesRead = Math.floor(options.start / doc.chunkSize) * doc.chunkSize;
stream.s.expected = Math.floor(options.start / doc.chunkSize);
return options.start - stream.s.bytesRead;
}
throw new MongoInvalidArgumentError('Start option must be defined');
}
function handleEndOption(
stream: GridFSBucketReadStream,
doc: Document,
cursor: FindCursor<GridFSChunk>,
options: GridFSBucketReadStreamOptions
) {
if (options && options.end != null) {
if (options.end > doc.length) {
throw new MongoInvalidArgumentError(
`Stream end (${options.end}) must not be more than the length of the file (${doc.length})`
);
}
if (options.start == null || options.start < 0) {
throw new MongoInvalidArgumentError(`Stream end (${options.end}) must not be negative`);
}
const start = options.start != null ? Math.floor(options.start / doc.chunkSize) : 0;
cursor.limit(Math.ceil(options.end / doc.chunkSize) - start);
stream.s.expectedEnd = Math.ceil(options.end / doc.chunkSize);
return Math.ceil(options.end / doc.chunkSize) * doc.chunkSize - options.end;
}
throw new MongoInvalidArgumentError('End option must be defined');
}

View file

@ -0,0 +1,264 @@
import type { ObjectId } from '../bson';
import type { Collection } from '../collection';
import type { FindCursor } from '../cursor/find_cursor';
import type { Db } from '../db';
import { MongoOperationTimeoutError, MongoRuntimeError } from '../error';
import { type Filter, TypedEventEmitter } from '../mongo_types';
import type { ReadPreference } from '../read_preference';
import type { Sort } from '../sort';
import { CSOTTimeoutContext } from '../timeout';
import { noop, resolveOptions } from '../utils';
import { WriteConcern, type WriteConcernOptions } from '../write_concern';
import type { FindOptions } from './../operations/find';
import {
GridFSBucketReadStream,
type GridFSBucketReadStreamOptions,
type GridFSBucketReadStreamOptionsWithRevision,
type GridFSFile
} from './download';
import {
GridFSBucketWriteStream,
type GridFSBucketWriteStreamOptions,
type GridFSChunk
} from './upload';
const DEFAULT_GRIDFS_BUCKET_OPTIONS: {
bucketName: string;
chunkSizeBytes: number;
} = {
bucketName: 'fs',
chunkSizeBytes: 255 * 1024
};
/** @public */
export interface GridFSBucketOptions extends WriteConcernOptions {
/** The 'files' and 'chunks' collections will be prefixed with the bucket name followed by a dot. */
bucketName?: string;
/** Number of bytes stored in each chunk. Defaults to 255KB */
chunkSizeBytes?: number;
/** Read preference to be passed to read operations */
readPreference?: ReadPreference;
/**
* @experimental
* Specifies the lifetime duration of a gridFS stream. If any async operations are in progress
* when this timeout expires, the stream will throw a timeout error.
*/
timeoutMS?: number;
}
/** @internal */
export interface GridFSBucketPrivate {
db: Db;
options: {
bucketName: string;
chunkSizeBytes: number;
readPreference?: ReadPreference;
writeConcern: WriteConcern | undefined;
timeoutMS?: number;
};
_chunksCollection: Collection<GridFSChunk>;
_filesCollection: Collection<GridFSFile>;
checkedIndexes: boolean;
calledOpenUploadStream: boolean;
}
/** @public */
export type GridFSBucketEvents = {
index(): void;
};
/**
* Constructor for a streaming GridFS interface
* @public
*/
export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
/** @internal */
s: GridFSBucketPrivate;
/**
* When the first call to openUploadStream is made, the upload stream will
* check to see if it needs to create the proper indexes on the chunks and
* files collections. This event is fired either when 1) it determines that
* no index creation is necessary, 2) when it successfully creates the
* necessary indexes.
* @event
*/
static readonly INDEX = 'index' as const;
constructor(db: Db, options?: GridFSBucketOptions) {
super();
this.on('error', noop);
this.setMaxListeners(0);
const privateOptions = resolveOptions(db, {
...DEFAULT_GRIDFS_BUCKET_OPTIONS,
...options,
writeConcern: WriteConcern.fromOptions(options)
});
this.s = {
db,
options: privateOptions,
_chunksCollection: db.collection<GridFSChunk>(privateOptions.bucketName + '.chunks'),
_filesCollection: db.collection<GridFSFile>(privateOptions.bucketName + '.files'),
checkedIndexes: false,
calledOpenUploadStream: false
};
}
/**
* Returns a writable stream (GridFSBucketWriteStream) for writing
* buffers to GridFS. The stream's 'id' property contains the resulting
* file's id.
*
* @param filename - The value of the 'filename' key in the files doc
* @param options - Optional settings.
*/
openUploadStream(
filename: string,
options?: GridFSBucketWriteStreamOptions
): GridFSBucketWriteStream {
return new GridFSBucketWriteStream(this, filename, {
timeoutMS: this.s.options.timeoutMS,
...options
});
}
/**
* Returns a writable stream (GridFSBucketWriteStream) for writing
* buffers to GridFS for a custom file id. The stream's 'id' property contains the resulting
* file's id.
*/
openUploadStreamWithId(
id: ObjectId,
filename: string,
options?: GridFSBucketWriteStreamOptions
): GridFSBucketWriteStream {
return new GridFSBucketWriteStream(this, filename, {
timeoutMS: this.s.options.timeoutMS,
...options,
id
});
}
/** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */
openDownloadStream(
id: ObjectId,
options?: GridFSBucketReadStreamOptions
): GridFSBucketReadStream {
return new GridFSBucketReadStream(
this.s._chunksCollection,
this.s._filesCollection,
this.s.options.readPreference,
{ _id: id },
{ timeoutMS: this.s.options.timeoutMS, ...options }
);
}
/**
* Deletes a file with the given id
*
* @param id - The id of the file doc
*/
async delete(id: ObjectId, options?: { timeoutMS: number }): Promise<void> {
const { timeoutMS } = resolveOptions(this.s.db, options);
let timeoutContext: CSOTTimeoutContext | undefined = undefined;
if (timeoutMS) {
timeoutContext = new CSOTTimeoutContext({
timeoutMS,
serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS
});
}
const { deletedCount } = await this.s._filesCollection.deleteOne(
{ _id: id },
{ timeoutMS: timeoutContext?.remainingTimeMS }
);
const remainingTimeMS = timeoutContext?.remainingTimeMS;
if (remainingTimeMS != null && remainingTimeMS <= 0)
throw new MongoOperationTimeoutError(`Timed out after ${timeoutMS}ms`);
// Delete orphaned chunks before returning FileNotFound
await this.s._chunksCollection.deleteMany({ files_id: id }, { timeoutMS: remainingTimeMS });
if (deletedCount === 0) {
// TODO(NODE-3483): Replace with more appropriate error
// Consider creating new error MongoGridFSFileNotFoundError
throw new MongoRuntimeError(`File not found for id ${id}`);
}
}
/** Convenience wrapper around find on the files collection */
find(filter: Filter<GridFSFile> = {}, options: FindOptions = {}): FindCursor<GridFSFile> {
return this.s._filesCollection.find(filter, options);
}
/**
* Returns a readable stream (GridFSBucketReadStream) for streaming the
* file with the given name from GridFS. If there are multiple files with
* the same name, this will stream the most recent file with the given name
* (as determined by the `uploadDate` field). You can set the `revision`
* option to change this behavior.
*/
openDownloadStreamByName(
filename: string,
options?: GridFSBucketReadStreamOptionsWithRevision
): GridFSBucketReadStream {
let sort: Sort = { uploadDate: -1 };
let skip = undefined;
if (options && options.revision != null) {
if (options.revision >= 0) {
sort = { uploadDate: 1 };
skip = options.revision;
} else {
skip = -options.revision - 1;
}
}
return new GridFSBucketReadStream(
this.s._chunksCollection,
this.s._filesCollection,
this.s.options.readPreference,
{ filename },
{ timeoutMS: this.s.options.timeoutMS, ...options, sort, skip }
);
}
/**
* Renames the file with the given _id to the given string
*
* @param id - the id of the file to rename
* @param filename - new name for the file
*/
async rename(id: ObjectId, filename: string, options?: { timeoutMS: number }): Promise<void> {
const filter = { _id: id };
const update = { $set: { filename } };
const { matchedCount } = await this.s._filesCollection.updateOne(filter, update, options);
if (matchedCount === 0) {
throw new MongoRuntimeError(`File with id ${id} not found`);
}
}
/** Removes this bucket's files collection, followed by its chunks collection. */
async drop(options?: { timeoutMS: number }): Promise<void> {
const { timeoutMS } = resolveOptions(this.s.db, options);
let timeoutContext: CSOTTimeoutContext | undefined = undefined;
if (timeoutMS) {
timeoutContext = new CSOTTimeoutContext({
timeoutMS,
serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS
});
}
if (timeoutContext) {
await this.s._filesCollection.drop({ timeoutMS: timeoutContext.remainingTimeMS });
const remainingTimeMS = timeoutContext.getRemainingTimeMSOrThrow(
`Timed out after ${timeoutMS}ms`
);
await this.s._chunksCollection.drop({ timeoutMS: remainingTimeMS });
} else {
await this.s._filesCollection.drop();
await this.s._chunksCollection.drop();
}
}
}

View file

@ -0,0 +1,559 @@
import { Writable } from 'stream';
import { type Document, ObjectId } from '../bson';
import type { Collection } from '../collection';
import { CursorTimeoutMode } from '../cursor/abstract_cursor';
import {
MongoAPIError,
MONGODB_ERROR_CODES,
MongoError,
MongoOperationTimeoutError
} from '../error';
import { CSOTTimeoutContext } from '../timeout';
import { type Callback, resolveTimeoutOptions, squashError } from '../utils';
import type { WriteConcernOptions } from '../write_concern';
import { WriteConcern } from './../write_concern';
import type { GridFSFile } from './download';
import type { GridFSBucket } from './index';
/** @public */
export interface GridFSChunk {
_id: ObjectId;
files_id: ObjectId;
n: number;
data: Buffer | Uint8Array;
}
/** @public */
export interface GridFSBucketWriteStreamOptions extends WriteConcernOptions {
/** Overwrite this bucket's chunkSizeBytes for this file */
chunkSizeBytes?: number;
/** Custom file id for the GridFS file. */
id?: ObjectId;
/** Object to store in the file document's `metadata` field */
metadata?: Document;
/**
* String to store in the file document's `contentType` field.
* @deprecated Will be removed in the next major version. Add a contentType field to the metadata document instead.
*/
contentType?: string;
/**
* Array of strings to store in the file document's `aliases` field.
* @deprecated Will be removed in the next major version. Add an aliases field to the metadata document instead.
*/
aliases?: string[];
/**
* @experimental
* Specifies the time an operation will run until it throws a timeout error
*/
timeoutMS?: number;
}
/**
* A writable stream that enables you to write buffers to GridFS.
*
* Do not instantiate this class directly. Use `openUploadStream()` instead.
* @public
*/
export class GridFSBucketWriteStream extends Writable {
bucket: GridFSBucket;
/** A Collection instance where the file's chunks are stored */
chunks: Collection<GridFSChunk>;
/** A Collection instance where the file's GridFSFile document is stored */
files: Collection<GridFSFile>;
/** The name of the file */
filename: string;
/** Options controlling the metadata inserted along with the file */
options: GridFSBucketWriteStreamOptions;
/** Indicates the stream is finished uploading */
done: boolean;
/** The ObjectId used for the `_id` field on the GridFSFile document */
id: ObjectId;
/** The number of bytes that each chunk will be limited to */
chunkSizeBytes: number;
/** Space used to store a chunk currently being inserted */
bufToStore: Buffer;
/** Accumulates the number of bytes inserted as the stream uploads chunks */
length: number;
/** Accumulates the number of chunks inserted as the stream uploads file contents */
n: number;
/** Tracks the current offset into the buffered bytes being uploaded */
pos: number;
/** Contains a number of properties indicating the current state of the stream */
state: {
/** If set the stream has ended */
streamEnd: boolean;
/** Indicates the number of chunks that still need to be inserted to exhaust the current buffered data */
outstandingRequests: number;
/** If set an error occurred during insertion */
errored: boolean;
/** If set the stream was intentionally aborted */
aborted: boolean;
};
/** The write concern setting to be used with every insert operation */
writeConcern?: WriteConcern;
/**
* The document containing information about the inserted file.
* This property is defined _after_ the finish event has been emitted.
* It will remain `null` if an error occurs.
*
* @example
* ```ts
* fs.createReadStream('file.txt')
* .pipe(bucket.openUploadStream('file.txt'))
* .on('finish', function () {
* console.log(this.gridFSFile)
* })
* ```
*/
gridFSFile: GridFSFile | null = null;
/** @internal */
timeoutContext?: CSOTTimeoutContext;
/**
* @param bucket - Handle for this stream's corresponding bucket
* @param filename - The value of the 'filename' key in the files doc
* @param options - Optional settings.
* @internal
*/
constructor(bucket: GridFSBucket, filename: string, options?: GridFSBucketWriteStreamOptions) {
super();
options = options ?? {};
this.bucket = bucket;
this.chunks = bucket.s._chunksCollection;
this.filename = filename;
this.files = bucket.s._filesCollection;
this.options = options;
this.writeConcern = WriteConcern.fromOptions(options) || bucket.s.options.writeConcern;
// Signals the write is all done
this.done = false;
this.id = options.id ? options.id : new ObjectId();
// properly inherit the default chunksize from parent
this.chunkSizeBytes = options.chunkSizeBytes || this.bucket.s.options.chunkSizeBytes;
this.bufToStore = Buffer.alloc(this.chunkSizeBytes);
this.length = 0;
this.n = 0;
this.pos = 0;
this.state = {
streamEnd: false,
outstandingRequests: 0,
errored: false,
aborted: false
};
if (options.timeoutMS != null)
this.timeoutContext = new CSOTTimeoutContext({
timeoutMS: options.timeoutMS,
serverSelectionTimeoutMS: resolveTimeoutOptions(this.bucket.s.db.client, {})
.serverSelectionTimeoutMS
});
}
/**
* @internal
*
* The stream is considered constructed when the indexes are done being created
*/
override _construct(callback: (error?: Error | null) => void): void {
if (!this.bucket.s.calledOpenUploadStream) {
this.bucket.s.calledOpenUploadStream = true;
checkIndexes(this).then(
() => {
this.bucket.s.checkedIndexes = true;
this.bucket.emit('index');
callback();
},
error => {
if (error instanceof MongoOperationTimeoutError) {
return handleError(this, error, callback);
}
squashError(error);
callback();
}
);
} else {
return process.nextTick(callback);
}
}
/**
* @internal
* Write a buffer to the stream.
*
* @param chunk - Buffer to write
* @param encoding - Optional encoding for the buffer
* @param callback - Function to call when the chunk was added to the buffer, or if the entire chunk was persisted to MongoDB if this chunk caused a flush.
*/
override _write(
chunk: Buffer | string,
encoding: BufferEncoding,
callback: Callback<void>
): void {
doWrite(this, chunk, encoding, callback);
}
/** @internal */
override _final(callback: (error?: Error | null) => void): void {
if (this.state.streamEnd) {
return process.nextTick(callback);
}
this.state.streamEnd = true;
writeRemnant(this, callback);
}
/**
* Places this write stream into an aborted state (all future writes fail)
* and deletes all chunks that have already been written.
*/
async abort(): Promise<void> {
if (this.state.streamEnd) {
// TODO(NODE-3485): Replace with MongoGridFSStreamClosed
throw new MongoAPIError('Cannot abort a stream that has already completed');
}
if (this.state.aborted) {
// TODO(NODE-3485): Replace with MongoGridFSStreamClosed
throw new MongoAPIError('Cannot call abort() on a stream twice');
}
this.state.aborted = true;
const remainingTimeMS = this.timeoutContext?.getRemainingTimeMSOrThrow(
`Upload timed out after ${this.timeoutContext?.timeoutMS}ms`
);
await this.chunks.deleteMany({ files_id: this.id }, { timeoutMS: remainingTimeMS });
}
}
function handleError(stream: GridFSBucketWriteStream, error: Error, callback: Callback): void {
if (stream.state.errored) {
process.nextTick(callback);
return;
}
stream.state.errored = true;
process.nextTick(callback, error);
}
function createChunkDoc(filesId: ObjectId, n: number, data: Buffer): GridFSChunk {
return {
_id: new ObjectId(),
files_id: filesId,
n,
data
};
}
async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise<void> {
const index = { files_id: 1, n: 1 };
let remainingTimeMS;
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
);
let indexes;
try {
indexes = await stream.chunks
.listIndexes({
timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined,
timeoutMS: remainingTimeMS
})
.toArray();
} catch (error) {
if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) {
indexes = [];
} else {
throw error;
}
}
const hasChunksIndex = !!indexes.find(index => {
const keys = Object.keys(index.key);
if (keys.length === 2 && index.key.files_id === 1 && index.key.n === 1) {
return true;
}
return false;
});
if (!hasChunksIndex) {
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
);
await stream.chunks.createIndex(index, {
...stream.writeConcern,
background: true,
unique: true,
timeoutMS: remainingTimeMS
});
}
}
function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void {
if (stream.done) {
return process.nextTick(callback);
}
if (stream.state.streamEnd && stream.state.outstandingRequests === 0 && !stream.state.errored) {
// Set done so we do not trigger duplicate createFilesDoc
stream.done = true;
// Create a new files doc
const gridFSFile = createFilesDoc(
stream.id,
stream.length,
stream.chunkSizeBytes,
stream.filename,
stream.options.contentType,
stream.options.aliases,
stream.options.metadata
);
if (isAborted(stream, callback)) {
return;
}
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
if (remainingTimeMS != null && remainingTimeMS <= 0) {
return handleError(
stream,
new MongoOperationTimeoutError(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
),
callback
);
}
stream.files
.insertOne(gridFSFile, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
.then(
() => {
stream.gridFSFile = gridFSFile;
callback();
},
error => {
return handleError(stream, error, callback);
}
);
return;
}
process.nextTick(callback);
}
async function checkIndexes(stream: GridFSBucketWriteStream): Promise<void> {
let remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
);
const doc = await stream.files.findOne(
{},
{
projection: { _id: 1 },
timeoutMS: remainingTimeMS
}
);
if (doc != null) {
// If at least one document exists assume the collection has the required index
return;
}
const index = { filename: 1, uploadDate: 1 };
let indexes;
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
);
const listIndexesOptions = {
timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined,
timeoutMS: remainingTimeMS
};
try {
indexes = await stream.files.listIndexes(listIndexesOptions).toArray();
} catch (error) {
if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) {
indexes = [];
} else {
throw error;
}
}
const hasFileIndex = !!indexes.find(index => {
const keys = Object.keys(index.key);
if (keys.length === 2 && index.key.filename === 1 && index.key.uploadDate === 1) {
return true;
}
return false;
});
if (!hasFileIndex) {
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
);
await stream.files.createIndex(index, { background: false, timeoutMS: remainingTimeMS });
}
await checkChunksIndex(stream);
}
function createFilesDoc(
_id: ObjectId,
length: number,
chunkSize: number,
filename: string,
contentType?: string,
aliases?: string[],
metadata?: Document
): GridFSFile {
const ret: GridFSFile = {
_id,
length,
chunkSize,
uploadDate: new Date(),
filename
};
if (contentType) {
ret.contentType = contentType;
}
if (aliases) {
ret.aliases = aliases;
}
if (metadata) {
ret.metadata = metadata;
}
return ret;
}
function doWrite(
stream: GridFSBucketWriteStream,
chunk: Buffer | string,
encoding: BufferEncoding,
callback: Callback<void>
): void {
if (isAborted(stream, callback)) {
return;
}
const inputBuf = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk, encoding);
stream.length += inputBuf.length;
// Input is small enough to fit in our buffer
if (stream.pos + inputBuf.length < stream.chunkSizeBytes) {
inputBuf.copy(stream.bufToStore, stream.pos);
stream.pos += inputBuf.length;
process.nextTick(callback);
return;
}
// Otherwise, buffer is too big for current chunk, so we need to flush
// to MongoDB.
let inputBufRemaining = inputBuf.length;
let spaceRemaining: number = stream.chunkSizeBytes - stream.pos;
let numToCopy = Math.min(spaceRemaining, inputBuf.length);
let outstandingRequests = 0;
while (inputBufRemaining > 0) {
const inputBufPos = inputBuf.length - inputBufRemaining;
inputBuf.copy(stream.bufToStore, stream.pos, inputBufPos, inputBufPos + numToCopy);
stream.pos += numToCopy;
spaceRemaining -= numToCopy;
let doc: GridFSChunk;
if (spaceRemaining === 0) {
doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore));
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
if (remainingTimeMS != null && remainingTimeMS <= 0) {
return handleError(
stream,
new MongoOperationTimeoutError(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
),
callback
);
}
++stream.state.outstandingRequests;
++outstandingRequests;
if (isAborted(stream, callback)) {
return;
}
stream.chunks
.insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
.then(
() => {
--stream.state.outstandingRequests;
--outstandingRequests;
if (!outstandingRequests) {
checkDone(stream, callback);
}
},
error => {
return handleError(stream, error, callback);
}
);
spaceRemaining = stream.chunkSizeBytes;
stream.pos = 0;
++stream.n;
}
inputBufRemaining -= numToCopy;
numToCopy = Math.min(spaceRemaining, inputBufRemaining);
}
}
function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void {
// Buffer is empty, so don't bother to insert
if (stream.pos === 0) {
return checkDone(stream, callback);
}
// Create a new buffer to make sure the buffer isn't bigger than it needs
// to be.
const remnant = Buffer.alloc(stream.pos);
stream.bufToStore.copy(remnant, 0, 0, stream.pos);
const doc = createChunkDoc(stream.id, stream.n, remnant);
// If the stream was aborted, do not write remnant
if (isAborted(stream, callback)) {
return;
}
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
if (remainingTimeMS != null && remainingTimeMS <= 0) {
return handleError(
stream,
new MongoOperationTimeoutError(
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
),
callback
);
}
++stream.state.outstandingRequests;
stream.chunks
.insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
.then(
() => {
--stream.state.outstandingRequests;
checkDone(stream, callback);
},
error => {
return handleError(stream, error, callback);
}
);
}
function isAborted(stream: GridFSBucketWriteStream, callback: Callback<void>): boolean {
if (stream.state.aborted) {
process.nextTick(callback, new MongoAPIError('Stream has been aborted'));
return true;
}
return false;
}

View file

@ -0,0 +1,636 @@
import { Admin } from './admin';
import { OrderedBulkOperation } from './bulk/ordered';
import { UnorderedBulkOperation } from './bulk/unordered';
import { ChangeStream } from './change_stream';
import { Collection } from './collection';
import { AbstractCursor } from './cursor/abstract_cursor';
import { AggregationCursor } from './cursor/aggregation_cursor';
import { FindCursor } from './cursor/find_cursor';
import { ListCollectionsCursor } from './cursor/list_collections_cursor';
import { ListIndexesCursor } from './cursor/list_indexes_cursor';
import type { RunCommandCursor } from './cursor/run_command_cursor';
import { Db } from './db';
import { ExplainableCursor } from './explain';
import { GridFSBucket } from './gridfs';
import { GridFSBucketReadStream } from './gridfs/download';
import { GridFSBucketWriteStream } from './gridfs/upload';
import { MongoClient } from './mongo_client';
import { CancellationToken } from './mongo_types';
import { ClientSession } from './sessions';
/** @public */
export { BSON } from './bson';
export {
Binary,
BSONRegExp,
BSONSymbol,
BSONType,
Code,
DBRef,
Decimal128,
Double,
Int32,
Long,
MaxKey,
MinKey,
ObjectId,
Timestamp,
UUID
} from './bson';
export {
type AnyBulkWriteOperation,
type BulkWriteOptions,
MongoBulkWriteError
} from './bulk/common';
export { ClientEncryption } from './client-side-encryption/client_encryption';
export { ChangeStreamCursor } from './cursor/change_stream_cursor';
export {
MongoAPIError,
MongoAWSError,
MongoAzureError,
MongoBatchReExecutionError,
MongoChangeStreamError,
MongoClientBulkWriteCursorError,
MongoClientBulkWriteError,
MongoClientBulkWriteExecutionError,
MongoCompatibilityError,
MongoCursorExhaustedError,
MongoCursorInUseError,
MongoDecompressionError,
MongoDriverError,
MongoError,
MongoExpiredSessionError,
MongoGCPError,
MongoGridFSChunkError,
MongoGridFSStreamError,
MongoInvalidArgumentError,
MongoKerberosError,
MongoMissingCredentialsError,
MongoMissingDependencyError,
MongoNetworkError,
MongoNetworkTimeoutError,
MongoNotConnectedError,
MongoOIDCError,
MongoOperationTimeoutError,
MongoParseError,
MongoRuntimeError,
MongoServerClosedError,
MongoServerError,
MongoServerSelectionError,
MongoStalePrimaryError,
MongoSystemError,
MongoTailableCursorError,
MongoTopologyClosedError,
MongoTransactionError,
MongoUnexpectedServerResponseError,
MongoWriteConcernError,
WriteConcernErrorResult
} from './error';
export { configureExplicitResourceManagement } from './resource_management';
export {
AbstractCursor,
// Actual driver classes exported
Admin,
AggregationCursor,
CancellationToken,
ChangeStream,
ClientSession,
Collection,
Db,
ExplainableCursor,
FindCursor,
GridFSBucket,
GridFSBucketReadStream,
GridFSBucketWriteStream,
ListCollectionsCursor,
ListIndexesCursor,
MongoClient,
OrderedBulkOperation,
RunCommandCursor,
UnorderedBulkOperation
};
// enums
export { BatchType } from './bulk/common';
export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypter';
export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi';
export { AuthMechanism } from './cmap/auth/providers';
export { Compressor } from './cmap/wire_protocol/compression';
export { CURSOR_FLAGS, CursorTimeoutMode } from './cursor/abstract_cursor';
export { MongoErrorLabel } from './error';
export { ExplainVerbosity } from './explain';
export { ServerApiVersion } from './mongo_client';
export { MongoLoggableComponent, SeverityLevel } from './mongo_logger';
export { ReturnDocument } from './operations/find_and_modify';
export { ProfilingLevel } from './operations/set_profiling_level';
export { ReadConcernLevel } from './read_concern';
export { ReadPreferenceMode } from './read_preference';
export { ServerType, TopologyType } from './sdam/common';
// Helper classes
export type { AWSCredentialProvider } from './cmap/auth/aws_temporary_credentials';
export type { AWSCredentials } from './deps';
export { ReadConcern } from './read_concern';
export { ReadPreference } from './read_preference';
export { WriteConcern } from './write_concern';
// events
export {
CommandFailedEvent,
CommandStartedEvent,
CommandSucceededEvent
} from './cmap/command_monitoring_events';
export {
ConnectionCheckedInEvent,
ConnectionCheckedOutEvent,
ConnectionCheckOutFailedEvent,
ConnectionCheckOutStartedEvent,
ConnectionClosedEvent,
ConnectionCreatedEvent,
ConnectionPoolClearedEvent,
ConnectionPoolClosedEvent,
ConnectionPoolCreatedEvent,
ConnectionPoolMonitoringEvent,
ConnectionPoolReadyEvent,
ConnectionReadyEvent
} from './cmap/connection_pool_events';
export {
ServerClosedEvent,
ServerDescriptionChangedEvent,
ServerHeartbeatFailedEvent,
ServerHeartbeatStartedEvent,
ServerHeartbeatSucceededEvent,
ServerOpeningEvent,
TopologyClosedEvent,
TopologyDescriptionChangedEvent,
TopologyOpeningEvent
} from './sdam/events';
export {
ServerSelectionEvent,
ServerSelectionFailedEvent,
ServerSelectionStartedEvent,
ServerSelectionSucceededEvent,
WaitingForSuitableServerEvent
} from './sdam/server_selection_events';
export { SrvPollingEvent } from './sdam/srv_polling';
// type only exports below, these are removed from emitted JS
export type { AdminPrivate } from './admin';
export type { BSONElement, BSONSerializeOptions, Document } from './bson';
export type { deserialize, serialize } from './bson';
export type {
BulkResult,
BulkWriteOperationError,
BulkWriteResult,
DeleteManyModel,
DeleteOneModel,
InsertOneModel,
ReplaceOneModel,
UpdateManyModel,
UpdateOneModel,
WriteConcernError,
WriteError
} from './bulk/common';
export type {
Batch,
BulkOperationBase,
BulkOperationPrivate,
FindOperators,
WriteConcernErrorData
} from './bulk/common';
export type {
ChangeStreamCollModDocument,
ChangeStreamCreateDocument,
ChangeStreamCreateIndexDocument,
ChangeStreamDeleteDocument,
ChangeStreamDocument,
ChangeStreamDocumentCollectionUUID,
ChangeStreamDocumentCommon,
ChangeStreamDocumentKey,
ChangeStreamDocumentOperationDescription,
ChangeStreamDropDatabaseDocument,
ChangeStreamDropDocument,
ChangeStreamDropIndexDocument,
ChangeStreamEvents,
ChangeStreamInsertDocument,
ChangeStreamInvalidateDocument,
ChangeStreamNameSpace,
ChangeStreamOptions,
ChangeStreamRefineCollectionShardKeyDocument,
ChangeStreamRenameDocument,
ChangeStreamReplaceDocument,
ChangeStreamReshardCollectionDocument,
ChangeStreamShardCollectionDocument,
ChangeStreamSplitEvent,
ChangeStreamUpdateDocument,
OperationTime,
ResumeOptions,
ResumeToken,
UpdateDescription
} from './change_stream';
export type { AutoEncrypter } from './client-side-encryption/auto_encrypter';
export type { AutoEncryptionOptions } from './client-side-encryption/auto_encrypter';
export type { AutoEncryptionExtraOptions } from './client-side-encryption/auto_encrypter';
export type {
AWSEncryptionKeyOptions,
AzureEncryptionKeyOptions,
ClientEncryptionCreateDataKeyProviderOptions,
ClientEncryptionEncryptOptions,
ClientEncryptionOptions,
ClientEncryptionRewrapManyDataKeyProviderOptions,
ClientEncryptionRewrapManyDataKeyResult,
DataKey,
GCPEncryptionKeyOptions,
KMIPEncryptionKeyOptions,
RangeOptions
} from './client-side-encryption/client_encryption';
export {
MongoCryptAzureKMSRequestError,
MongoCryptCreateDataKeyError,
MongoCryptCreateEncryptedCollectionError,
MongoCryptError,
MongoCryptInvalidArgumentError,
MongoCryptKMSRequestNetworkTimeoutError
} from './client-side-encryption/errors';
export type { MongocryptdManager } from './client-side-encryption/mongocryptd_manager';
export type {
AWSKMSProviderConfiguration,
AzureKMSProviderConfiguration,
ClientEncryptionDataKeyProvider,
CredentialProviders,
GCPKMSProviderConfiguration,
KMIPKMSProviderConfiguration,
KMSProviders,
LocalKMSProviderConfiguration
} from './client-side-encryption/providers/index';
export type {
ClientEncryptionSocketOptions,
ClientEncryptionTlsOptions,
CSFLEKMSTlsOptions,
StateMachineExecutable
} from './client-side-encryption/state_machine';
export type { AuthContext, AuthProvider } from './cmap/auth/auth_provider';
export type {
AuthMechanismProperties,
MongoCredentials,
MongoCredentialsOptions
} from './cmap/auth/mongo_credentials';
export type {
IdPInfo,
IdPServerResponse,
OIDCCallbackFunction,
OIDCCallbackParams,
OIDCResponse
} from './cmap/auth/mongodb_oidc';
export type { Workflow } from './cmap/auth/mongodb_oidc';
export type { TokenCache } from './cmap/auth/mongodb_oidc/token_cache';
export type {
MessageHeader,
OpCompressedRequest,
OpMsgOptions,
OpMsgRequest,
OpMsgResponse,
OpQueryOptions,
OpQueryRequest,
OpReply,
WriteProtocolMessageType
} from './cmap/commands';
export type { HandshakeDocument } from './cmap/connect';
export type { LEGAL_TCP_SOCKET_OPTIONS, LEGAL_TLS_SOCKET_OPTIONS, Stream } from './cmap/connect';
export type {
CommandOptions,
Connection,
ConnectionEvents,
ConnectionOptions,
ProxyOptions
} from './cmap/connection';
export type {
CloseOptions,
ConnectionPool,
ConnectionPoolEvents,
ConnectionPoolOptions,
PoolState,
WaitQueueMember,
WithConnectionCallback
} from './cmap/connection_pool';
export type { ClientMetadata, ClientMetadataOptions } from './cmap/handshake/client_metadata';
export type { ConnectionPoolMetrics } from './cmap/metrics';
export type { StreamDescription, StreamDescriptionOptions } from './cmap/stream_description';
export type { CompressorName } from './cmap/wire_protocol/compression';
export type {
JSTypeOf,
OnDemandDocument,
OnDemandDocumentDeserializeOptions
} from './cmap/wire_protocol/on_demand/document';
export type {
CursorResponse,
MongoDBResponse,
MongoDBResponseConstructor
} from './cmap/wire_protocol/responses';
export type {
CollectionOptions,
CollectionPrivate,
CountDocumentsOptions,
ModifyResult
} from './collection';
export type {
COMMAND_FAILED,
COMMAND_STARTED,
COMMAND_SUCCEEDED,
CONNECTION_CHECK_OUT_FAILED,
CONNECTION_CHECK_OUT_STARTED,
CONNECTION_CHECKED_IN,
CONNECTION_CHECKED_OUT,
CONNECTION_CLOSED,
CONNECTION_CREATED,
CONNECTION_POOL_CLEARED,
CONNECTION_POOL_CLOSED,
CONNECTION_POOL_CREATED,
CONNECTION_POOL_READY,
CONNECTION_READY,
MONGO_CLIENT_EVENTS,
SERVER_CLOSED,
SERVER_DESCRIPTION_CHANGED,
SERVER_HEARTBEAT_FAILED,
SERVER_HEARTBEAT_STARTED,
SERVER_HEARTBEAT_SUCCEEDED,
SERVER_OPENING,
SERVER_SELECTION_FAILED,
SERVER_SELECTION_STARTED,
SERVER_SELECTION_SUCCEEDED,
TOPOLOGY_CLOSED,
TOPOLOGY_DESCRIPTION_CHANGED,
TOPOLOGY_OPENING,
WAITING_FOR_SUITABLE_SERVER
} from './constants';
export type {
AbstractCursorEvents,
AbstractCursorOptions,
CursorFlag,
CursorStreamOptions
} from './cursor/abstract_cursor';
export type {
CursorTimeoutContext,
InitialCursorResponse,
InternalAbstractCursorOptions
} from './cursor/abstract_cursor';
export type { AggregationCursorOptions } from './cursor/aggregation_cursor';
export type { ChangeStreamCursorOptions } from './cursor/change_stream_cursor';
export type {
ListSearchIndexesCursor,
ListSearchIndexesOptions
} from './cursor/list_search_indexes_cursor';
export type { RunCursorCommandOptions } from './cursor/run_command_cursor';
export type { DbOptions, DbPrivate } from './db';
export type { Encrypter, EncrypterOptions } from './encrypter';
export type { AnyError, ErrorDescription, MongoNetworkErrorOptions } from './error';
export type {
Explain,
ExplainCommandOptions,
ExplainOptions,
ExplainVerbosityLike
} from './explain';
export type {
GridFSBucketReadStreamOptions,
GridFSBucketReadStreamOptionsWithRevision,
GridFSBucketReadStreamPrivate,
GridFSFile
} from './gridfs/download';
export type { GridFSBucketEvents, GridFSBucketOptions, GridFSBucketPrivate } from './gridfs/index';
export type { GridFSBucketWriteStreamOptions, GridFSChunk } from './gridfs/upload';
export type {
Auth,
DriverInfo,
MongoClientEvents,
MongoClientOptions,
MongoClientPrivate,
MongoOptions,
PkFactory,
ServerApi,
SupportedNodeConnectionOptions,
SupportedSocketOptions,
SupportedTLSConnectionOptions,
SupportedTLSSocketOptions,
WithSessionCallback
} from './mongo_client';
export { MongoClientAuthProviders } from './mongo_client_auth_providers';
export type {
Log,
LogComponentSeveritiesClientOptions,
LogConvertible,
Loggable,
LoggableCommandFailedEvent,
LoggableCommandSucceededEvent,
LoggableEvent,
LoggableServerHeartbeatFailedEvent,
LoggableServerHeartbeatStartedEvent,
LoggableServerHeartbeatSucceededEvent,
MongoDBLogWritable,
MongoLogger,
MongoLoggerEnvOptions,
MongoLoggerMongoClientOptions,
MongoLoggerOptions
} from './mongo_logger';
export type {
Abortable,
CommonEvents,
EventsDescription,
GenericListener,
TypedEventEmitter
} from './mongo_types';
export type {
AcceptedFields,
AddToSetOperators,
AlternativeType,
ArrayElement,
ArrayOperator,
BitwiseFilter,
BSONTypeAlias,
Condition,
EnhancedOmit,
Filter,
FilterOperations,
FilterOperators,
Flatten,
InferIdType,
IntegerType,
IsAny,
Join,
KeysOfAType,
KeysOfOtherType,
MatchKeysAndValues,
NestedPaths,
NestedPathsOfType,
NonObjectIdLikeDocument,
NotAcceptedFields,
NumericType,
OneOrMore,
OnlyFieldsOfType,
OptionalId,
OptionalUnlessRequiredId,
PropertyType,
PullAllOperator,
PullOperator,
PushOperator,
RegExpOrString,
RootFilterOperators,
SchemaMember,
SetFields,
StrictFilter,
StrictMatchKeysAndValues,
StrictUpdateFilter,
UpdateFilter,
WithId,
WithoutId
} from './mongo_types';
export type {
AggregateOperation,
AggregateOptions,
DB_AGGREGATE_COLLECTION
} from './operations/aggregate';
export type {
AnyClientBulkWriteModel,
ClientBulkWriteError,
ClientBulkWriteModel,
ClientBulkWriteOptions,
ClientBulkWriteResult,
ClientDeleteManyModel,
ClientDeleteOneModel,
ClientDeleteResult,
ClientInsertOneModel,
ClientInsertOneResult,
ClientReplaceOneModel,
ClientUpdateManyModel,
ClientUpdateOneModel,
ClientUpdateResult,
ClientWriteModel
} from './operations/client_bulk_write/common';
export type {
CollationOptions,
CommandOperation,
CommandOperationOptions,
OperationParent
} from './operations/command';
export type { CountOptions } from './operations/count';
export type {
ClusteredCollectionOptions,
CreateCollectionOptions,
TimeSeriesCollectionOptions
} from './operations/create_collection';
export type { DeleteOptions, DeleteResult, DeleteStatement } from './operations/delete';
export type { DistinctOptions } from './operations/distinct';
export type { DropCollectionOptions, DropDatabaseOptions } from './operations/drop';
export type { EstimatedDocumentCountOptions } from './operations/estimated_document_count';
export type { FindOptions } from './operations/find';
export type {
FindOneAndDeleteOptions,
FindOneAndReplaceOptions,
FindOneAndUpdateOptions
} from './operations/find_and_modify';
export type { IndexInformationOptions } from './operations/indexes';
export type {
CreateIndexesOptions,
DropIndexesOptions,
IndexDescription,
IndexDescriptionCompact,
IndexDescriptionInfo,
IndexDirection,
IndexSpecification,
ListIndexesOptions
} from './operations/indexes';
export type { InsertManyResult, InsertOneOptions, InsertOneResult } from './operations/insert';
export type { CollectionInfo, ListCollectionsOptions } from './operations/list_collections';
export type { ListDatabasesOptions, ListDatabasesResult } from './operations/list_databases';
export type { AbstractOperation, Hint, OperationOptions } from './operations/operation';
export type { ProfilingLevelOptions } from './operations/profiling_level';
export type { RemoveUserOptions } from './operations/remove_user';
export type { RenameOptions } from './operations/rename';
export type { RunCommandOptions } from './operations/run_command';
export type { SearchIndexDescription } from './operations/search_indexes/create';
export type { SetProfilingLevelOptions } from './operations/set_profiling_level';
export type { DbStatsOptions } from './operations/stats';
export type {
ReplaceOptions,
UpdateOptions,
UpdateResult,
UpdateStatement
} from './operations/update';
export type { ValidateCollectionOptions } from './operations/validate_collection';
export type { ReadConcernLike } from './read_concern';
export type {
HedgeOptions,
ReadPreferenceFromOptions,
ReadPreferenceLike,
ReadPreferenceLikeOptions,
ReadPreferenceOptions
} from './read_preference';
export type { AsyncDisposable } from './resource_management';
export type { ClusterTime } from './sdam/common';
export type {
Monitor,
MonitorEvents,
MonitorInterval,
MonitorIntervalOptions,
MonitorOptions,
MonitorPrivate,
RTTPinger,
RTTPingerOptions,
RTTSampler,
ServerMonitoringMode
} from './sdam/monitor';
export type {
Server,
ServerCommandOptions,
ServerEvents,
ServerOptions,
ServerPrivate
} from './sdam/server';
export type {
ServerDescription,
ServerDescriptionOptions,
TagSet,
TopologyVersion
} from './sdam/server_description';
export type { ServerSelector } from './sdam/server_selection';
export type { SrvPoller, SrvPollerEvents, SrvPollerOptions } from './sdam/srv_polling';
export type {
ConnectOptions,
SelectServerOptions,
ServerCapabilities,
ServerSelectionCallback,
ServerSelectionRequest,
Topology,
TopologyEvents,
TopologyOptions,
TopologyPrivate
} from './sdam/topology';
export type { TopologyDescription, TopologyDescriptionOptions } from './sdam/topology_description';
export type {
ClientSessionEvents,
ClientSessionOptions,
EndSessionOptions,
ServerSession,
ServerSessionId,
ServerSessionPool,
WithTransactionCallback
} from './sessions';
export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort';
export type {
CSOTTimeoutContext,
CSOTTimeoutContextOptions,
LegacyTimeoutContext,
LegacyTimeoutContextOptions,
Timeout,
TimeoutContext,
TimeoutContextOptions
} from './timeout';
export type { Transaction, TransactionOptions, TxnState } from './transactions';
export type {
BufferPool,
Callback,
EventEmitterWithState,
HostAddress,
List,
MongoDBCollectionNamespace,
MongoDBNamespace
} from './utils';
export type { W, WriteConcernOptions, WriteConcernSettings } from './write_concern';

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,95 @@
import { type AuthProvider } from './cmap/auth/auth_provider';
import { GSSAPI } from './cmap/auth/gssapi';
import { type AuthMechanismProperties } from './cmap/auth/mongo_credentials';
import { MongoDBAWS } from './cmap/auth/mongodb_aws';
import { MongoDBOIDC, OIDC_WORKFLOWS, type Workflow } from './cmap/auth/mongodb_oidc';
import { AutomatedCallbackWorkflow } from './cmap/auth/mongodb_oidc/automated_callback_workflow';
import { HumanCallbackWorkflow } from './cmap/auth/mongodb_oidc/human_callback_workflow';
import { TokenCache } from './cmap/auth/mongodb_oidc/token_cache';
import { Plain } from './cmap/auth/plain';
import { AuthMechanism } from './cmap/auth/providers';
import { ScramSHA1, ScramSHA256 } from './cmap/auth/scram';
import { X509 } from './cmap/auth/x509';
import { MongoInvalidArgumentError } from './error';
/** @internal */
const AUTH_PROVIDERS = new Map<
AuthMechanism | string,
(authMechanismProperties: AuthMechanismProperties) => AuthProvider
>([
[
AuthMechanism.MONGODB_AWS,
({ AWS_CREDENTIAL_PROVIDER }) => new MongoDBAWS(AWS_CREDENTIAL_PROVIDER)
],
[
AuthMechanism.MONGODB_CR,
() => {
throw new MongoInvalidArgumentError(
'MONGODB-CR is no longer a supported auth mechanism in MongoDB 4.0+'
);
}
],
[AuthMechanism.MONGODB_GSSAPI, () => new GSSAPI()],
[AuthMechanism.MONGODB_OIDC, properties => new MongoDBOIDC(getWorkflow(properties))],
[AuthMechanism.MONGODB_PLAIN, () => new Plain()],
[AuthMechanism.MONGODB_SCRAM_SHA1, () => new ScramSHA1()],
[AuthMechanism.MONGODB_SCRAM_SHA256, () => new ScramSHA256()],
[AuthMechanism.MONGODB_X509, () => new X509()]
]);
/**
* Create a set of providers per client
* to avoid sharing the provider's cache between different clients.
* @internal
*/
export class MongoClientAuthProviders {
private existingProviders: Map<AuthMechanism | string, AuthProvider> = new Map();
/**
* Get or create an authentication provider based on the provided mechanism.
* We don't want to create all providers at once, as some providers may not be used.
* @param name - The name of the provider to get or create.
* @param credentials - The credentials.
* @returns The provider.
* @throws MongoInvalidArgumentError if the mechanism is not supported.
* @internal
*/
getOrCreateProvider(
name: AuthMechanism | string,
authMechanismProperties: AuthMechanismProperties
): AuthProvider {
const authProvider = this.existingProviders.get(name);
if (authProvider) {
return authProvider;
}
const providerFunction = AUTH_PROVIDERS.get(name);
if (!providerFunction) {
throw new MongoInvalidArgumentError(`authMechanism ${name} not supported`);
}
const provider = providerFunction(authMechanismProperties);
this.existingProviders.set(name, provider);
return provider;
}
}
/**
* Gets either a device workflow or callback workflow.
*/
function getWorkflow(authMechanismProperties: AuthMechanismProperties): Workflow {
if (authMechanismProperties.OIDC_HUMAN_CALLBACK) {
return new HumanCallbackWorkflow(new TokenCache(), authMechanismProperties.OIDC_HUMAN_CALLBACK);
} else if (authMechanismProperties.OIDC_CALLBACK) {
return new AutomatedCallbackWorkflow(new TokenCache(), authMechanismProperties.OIDC_CALLBACK);
} else {
const environment = authMechanismProperties.ENVIRONMENT;
const workflow = OIDC_WORKFLOWS.get(environment)?.();
if (!workflow) {
throw new MongoInvalidArgumentError(
`Could not load workflow for environment ${authMechanismProperties.ENVIRONMENT}`
);
}
return workflow;
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,676 @@
import { EventEmitter } from 'events';
import type {
Binary,
BSONRegExp,
BSONType,
Decimal128,
Document,
Double,
Int32,
Long,
ObjectId,
ObjectIdLike,
Timestamp
} from './bson';
import { type CommandStartedEvent } from './cmap/command_monitoring_events';
import {
type LoggableCommandFailedEvent,
type LoggableCommandSucceededEvent,
type LoggableServerHeartbeatFailedEvent,
type LoggableServerHeartbeatStartedEvent,
type LoggableServerHeartbeatSucceededEvent,
MongoLoggableComponent,
type MongoLogger
} from './mongo_logger';
import type { Sort } from './sort';
import { noop } from './utils';
/** @internal */
export type TODO_NODE_3286 = any;
/** Given an object shaped type, return the type of the _id field or default to ObjectId @public */
export type InferIdType<TSchema> = TSchema extends { _id: infer IdType }
? // user has defined a type for _id
Record<any, never> extends IdType
? never // explicitly forbid empty objects as the type of _id
: IdType
: TSchema extends { _id?: infer IdType }
? // optional _id defined - return ObjectId | IdType
unknown extends IdType
? ObjectId // infer the _id type as ObjectId if the type of _id is unknown
: IdType
: ObjectId; // user has not defined _id on schema
/** Add an _id field to an object shaped type @public */
export type WithId<TSchema> = EnhancedOmit<TSchema, '_id'> & { _id: InferIdType<TSchema> };
/**
* Add an optional _id field to an object shaped type
* @public
*/
export type OptionalId<TSchema> = EnhancedOmit<TSchema, '_id'> & { _id?: InferIdType<TSchema> };
/**
* Adds an optional _id field to an object shaped type, unless the _id field is required on that type.
* In the case _id is required, this method continues to require_id.
*
* @public
*
* @privateRemarks
* `ObjectId extends TSchema['_id']` is a confusing ordering at first glance. Rather than ask
* `TSchema['_id'] extends ObjectId` which translated to "Is the _id property ObjectId?"
* we instead ask "Does ObjectId look like (have the same shape) as the _id?"
*/
export type OptionalUnlessRequiredId<TSchema> = TSchema extends { _id: any }
? TSchema
: OptionalId<TSchema>;
/** TypeScript Omit (Exclude to be specific) does not work for objects with an "any" indexed type, and breaks discriminated unions @public */
export type EnhancedOmit<TRecordOrUnion, KeyUnion> = string extends keyof TRecordOrUnion
? TRecordOrUnion // TRecordOrUnion has indexed type e.g. { _id: string; [k: string]: any; } or it is "any"
: TRecordOrUnion extends any
? Pick<TRecordOrUnion, Exclude<keyof TRecordOrUnion, KeyUnion>> // discriminated unions
: never;
/** Remove the _id field from an object shaped type @public */
export type WithoutId<TSchema> = Omit<TSchema, '_id'>;
/** A MongoDB filter can be some portion of the schema or a set of operators @public */
export type Filter<TSchema> = {
[P in keyof WithId<TSchema>]?: Condition<WithId<TSchema>[P]>;
} & RootFilterOperators<WithId<TSchema>>;
/** @public */
export type Condition<T> = AlternativeType<T> | FilterOperators<AlternativeType<T>>;
/**
* It is possible to search using alternative types in mongodb e.g.
* string types can be searched using a regex in mongo
* array types can be searched using their element type
* @public
*/
export type AlternativeType<T> =
T extends ReadonlyArray<infer U> ? T | RegExpOrString<U> : RegExpOrString<T>;
/** @public */
export type RegExpOrString<T> = T extends string ? BSONRegExp | RegExp | T : T;
/** @public */
export interface RootFilterOperators<TSchema> extends Document {
$and?: Filter<TSchema>[];
$nor?: Filter<TSchema>[];
$or?: Filter<TSchema>[];
$text?: {
$search: string;
$language?: string;
$caseSensitive?: boolean;
$diacriticSensitive?: boolean;
};
$where?: string | ((this: TSchema) => boolean);
$comment?: string | Document;
}
/**
* @public
* A type that extends Document but forbids anything that "looks like" an object id.
*/
export type NonObjectIdLikeDocument = {
[key in keyof ObjectIdLike]?: never;
} & Document;
/** @public */
export interface FilterOperators<TValue> extends NonObjectIdLikeDocument {
// Comparison
$eq?: TValue;
$gt?: TValue;
$gte?: TValue;
$in?: ReadonlyArray<TValue>;
$lt?: TValue;
$lte?: TValue;
$ne?: TValue;
$nin?: ReadonlyArray<TValue>;
// Logical
$not?: TValue extends string ? FilterOperators<TValue> | RegExp : FilterOperators<TValue>;
// Element
/**
* When `true`, `$exists` matches the documents that contain the field,
* including documents where the field value is null.
*/
$exists?: boolean;
$type?: BSONType | BSONTypeAlias;
// Evaluation
$expr?: Record<string, any>;
$jsonSchema?: Record<string, any>;
$mod?: TValue extends number ? [number, number] : never;
$regex?: TValue extends string ? RegExp | BSONRegExp | string : never;
$options?: TValue extends string ? string : never;
// Geospatial
$geoIntersects?: { $geometry: Document };
$geoWithin?: Document;
$near?: Document;
$nearSphere?: Document;
$maxDistance?: number;
// Array
$all?: ReadonlyArray<any>;
$elemMatch?: Document;
$size?: TValue extends ReadonlyArray<any> ? number : never;
// Bitwise
$bitsAllClear?: BitwiseFilter;
$bitsAllSet?: BitwiseFilter;
$bitsAnyClear?: BitwiseFilter;
$bitsAnySet?: BitwiseFilter;
$rand?: Record<string, never>;
}
/** @public */
export type BitwiseFilter =
| number /** numeric bit mask */
| Binary /** BinData bit mask */
| ReadonlyArray<number>; /** `[ <position1>, <position2>, ... ]` */
/** @public */
export type BSONTypeAlias = keyof typeof BSONType;
/** @public */
export type IsAny<Type, ResultIfAny, ResultIfNotAny> = true extends false & Type
? ResultIfAny
: ResultIfNotAny;
/** @public */
export type Flatten<Type> = Type extends ReadonlyArray<infer Item> ? Item : Type;
/** @public */
export type ArrayElement<Type> = Type extends ReadonlyArray<infer Item> ? Item : never;
/** @public */
export type SchemaMember<T, V> = { [P in keyof T]?: V } | { [key: string]: V };
/** @public */
export type IntegerType = number | Int32 | Long | bigint;
/** @public */
export type NumericType = IntegerType | Decimal128 | Double;
/** @public */
export type FilterOperations<T> =
T extends Record<string, any>
? { [key in keyof T]?: FilterOperators<T[key]> }
: FilterOperators<T>;
/** @public */
export type KeysOfAType<TSchema, Type> = {
[key in keyof TSchema]: NonNullable<TSchema[key]> extends Type ? key : never;
}[keyof TSchema];
/** @public */
export type KeysOfOtherType<TSchema, Type> = {
[key in keyof TSchema]: NonNullable<TSchema[key]> extends Type ? never : key;
}[keyof TSchema];
/** @public */
export type AcceptedFields<TSchema, FieldType, AssignableType> = {
readonly [key in KeysOfAType<TSchema, FieldType>]?: AssignableType;
};
/** It avoids using fields with not acceptable types @public */
export type NotAcceptedFields<TSchema, FieldType> = {
readonly [key in KeysOfOtherType<TSchema, FieldType>]?: never;
};
/** @public */
export type OnlyFieldsOfType<TSchema, FieldType = any, AssignableType = FieldType> = IsAny<
TSchema[keyof TSchema],
AssignableType extends FieldType ? Record<string, FieldType> : Record<string, AssignableType>,
AcceptedFields<TSchema, FieldType, AssignableType> &
NotAcceptedFields<TSchema, FieldType> &
Record<string, AssignableType>
>;
/** @public */
export type MatchKeysAndValues<TSchema> = Readonly<Partial<TSchema>> & Record<string, any>;
/** @public */
export type AddToSetOperators<Type> = {
$each?: Array<Flatten<Type>>;
};
/** @public */
export type ArrayOperator<Type> = {
$each?: Array<Flatten<Type>>;
$slice?: number;
$position?: number;
$sort?: Sort;
};
/** @public */
export type SetFields<TSchema> = ({
readonly [key in KeysOfAType<TSchema, ReadonlyArray<any> | undefined>]?:
| OptionalId<Flatten<TSchema[key]>>
| AddToSetOperators<Array<OptionalId<Flatten<TSchema[key]>>>>;
} & IsAny<
TSchema[keyof TSchema],
object,
NotAcceptedFields<TSchema, ReadonlyArray<any> | undefined>
>) & {
readonly [key: string]: AddToSetOperators<any> | any;
};
/** @public */
export type PushOperator<TSchema> = ({
readonly [key in KeysOfAType<TSchema, ReadonlyArray<any>>]?:
| Flatten<TSchema[key]>
| ArrayOperator<Array<Flatten<TSchema[key]>>>;
} & NotAcceptedFields<TSchema, ReadonlyArray<any>>) & {
readonly [key: string]: ArrayOperator<any> | any;
};
/** @public */
export type PullOperator<TSchema> = ({
readonly [key in KeysOfAType<TSchema, ReadonlyArray<any>>]?:
| Partial<Flatten<TSchema[key]>>
| FilterOperations<Flatten<TSchema[key]>>;
} & NotAcceptedFields<TSchema, ReadonlyArray<any>>) & {
readonly [key: string]: FilterOperators<any> | any;
};
/** @public */
export type PullAllOperator<TSchema> = ({
readonly [key in KeysOfAType<TSchema, ReadonlyArray<any>>]?: TSchema[key];
} & NotAcceptedFields<TSchema, ReadonlyArray<any>>) & {
readonly [key: string]: ReadonlyArray<any>;
};
/** @public */
export type UpdateFilter<TSchema> = {
$currentDate?: OnlyFieldsOfType<
TSchema,
Date | Timestamp,
true | { $type: 'date' | 'timestamp' }
>;
$inc?: OnlyFieldsOfType<TSchema, NumericType | undefined>;
$min?: MatchKeysAndValues<TSchema>;
$max?: MatchKeysAndValues<TSchema>;
$mul?: OnlyFieldsOfType<TSchema, NumericType | undefined>;
$rename?: Record<string, string>;
$set?: MatchKeysAndValues<TSchema>;
$setOnInsert?: MatchKeysAndValues<TSchema>;
$unset?: OnlyFieldsOfType<TSchema, any, '' | true | 1>;
$addToSet?: SetFields<TSchema>;
$pop?: OnlyFieldsOfType<TSchema, ReadonlyArray<any>, 1 | -1>;
$pull?: PullOperator<TSchema>;
$push?: PushOperator<TSchema>;
$pullAll?: PullAllOperator<TSchema>;
$bit?: OnlyFieldsOfType<
TSchema,
NumericType | undefined,
{ and: IntegerType } | { or: IntegerType } | { xor: IntegerType }
>;
} & Document;
/** @public */
export type Nullable<AnyType> = AnyType | null | undefined;
/** @public */
export type OneOrMore<T> = T | ReadonlyArray<T>;
/** @public */
export type GenericListener = (...args: any[]) => void;
/**
* Event description type
* @public
*/
export type EventsDescription = Record<string, GenericListener>;
/** @public */
export type CommonEvents = 'newListener' | 'removeListener';
/**
* Typescript type safe event emitter
* @public
*/
export declare interface TypedEventEmitter<Events extends EventsDescription> extends EventEmitter {
addListener<EventKey extends keyof Events>(event: EventKey, listener: Events[EventKey]): this;
addListener(
event: CommonEvents,
listener: (eventName: string | symbol, listener: GenericListener) => void
): this;
addListener(event: string | symbol, listener: GenericListener): this;
on<EventKey extends keyof Events>(event: EventKey, listener: Events[EventKey]): this;
on(
event: CommonEvents,
listener: (eventName: string | symbol, listener: GenericListener) => void
): this;
on(event: string | symbol, listener: GenericListener): this;
once<EventKey extends keyof Events>(event: EventKey, listener: Events[EventKey]): this;
once(
event: CommonEvents,
listener: (eventName: string | symbol, listener: GenericListener) => void
): this;
once(event: string | symbol, listener: GenericListener): this;
removeListener<EventKey extends keyof Events>(event: EventKey, listener: Events[EventKey]): this;
removeListener(
event: CommonEvents,
listener: (eventName: string | symbol, listener: GenericListener) => void
): this;
removeListener(event: string | symbol, listener: GenericListener): this;
off<EventKey extends keyof Events>(event: EventKey, listener: Events[EventKey]): this;
off(
event: CommonEvents,
listener: (eventName: string | symbol, listener: GenericListener) => void
): this;
off(event: string | symbol, listener: GenericListener): this;
removeAllListeners<EventKey extends keyof Events>(
event?: EventKey | CommonEvents | symbol | string
): this;
listeners<EventKey extends keyof Events>(
event: EventKey | CommonEvents | symbol | string
): Events[EventKey][];
rawListeners<EventKey extends keyof Events>(
event: EventKey | CommonEvents | symbol | string
): Events[EventKey][];
emit<EventKey extends keyof Events>(
event: EventKey | symbol,
...args: Parameters<Events[EventKey]>
): boolean;
listenerCount<EventKey extends keyof Events>(
type: EventKey | CommonEvents | symbol | string
): number;
prependListener<EventKey extends keyof Events>(event: EventKey, listener: Events[EventKey]): this;
prependListener(
event: CommonEvents,
listener: (eventName: string | symbol, listener: GenericListener) => void
): this;
prependListener(event: string | symbol, listener: GenericListener): this;
prependOnceListener<EventKey extends keyof Events>(
event: EventKey,
listener: Events[EventKey]
): this;
prependOnceListener(
event: CommonEvents,
listener: (eventName: string | symbol, listener: GenericListener) => void
): this;
prependOnceListener(event: string | symbol, listener: GenericListener): this;
eventNames(): string[];
getMaxListeners(): number;
setMaxListeners(n: number): this;
}
/**
* Typescript type safe event emitter
* @public
*/
// eslint-disable-next-line @typescript-eslint/no-unsafe-declaration-merging
export class TypedEventEmitter<Events extends EventsDescription> extends EventEmitter {
/** @internal */
protected mongoLogger?: MongoLogger;
/** @internal */
protected component?: MongoLoggableComponent;
/** @internal */
emitAndLog<EventKey extends keyof Events>(
event: EventKey | symbol,
...args: Parameters<Events[EventKey]>
): void {
this.emit(event, ...args);
if (this.component) this.mongoLogger?.debug(this.component, args[0]);
}
/** @internal */
emitAndLogHeartbeat<EventKey extends keyof Events>(
event: EventKey | symbol,
topologyId: number,
serverConnectionId?: number | '<monitor>',
...args: Parameters<Events[EventKey]>
): void {
this.emit(event, ...args);
if (this.component) {
const loggableHeartbeatEvent:
| LoggableServerHeartbeatFailedEvent
| LoggableServerHeartbeatSucceededEvent
| LoggableServerHeartbeatStartedEvent = {
topologyId: topologyId,
serverConnectionId: serverConnectionId ?? null,
...args[0]
};
this.mongoLogger?.debug(this.component, loggableHeartbeatEvent);
}
}
/** @internal */
emitAndLogCommand<EventKey extends keyof Events>(
monitorCommands: boolean,
event: EventKey | symbol,
databaseName: string,
connectionEstablished: boolean,
...args: Parameters<Events[EventKey]>
): void {
if (monitorCommands) {
this.emit(event, ...args);
}
if (connectionEstablished) {
const loggableCommandEvent:
| CommandStartedEvent
| LoggableCommandFailedEvent
| LoggableCommandSucceededEvent = {
databaseName: databaseName,
...args[0]
};
this.mongoLogger?.debug(MongoLoggableComponent.COMMAND, loggableCommandEvent);
}
}
}
/** @public */
export class CancellationToken extends TypedEventEmitter<{ cancel(): void }> {
constructor(...args: any[]) {
super(...args);
this.on('error', noop);
}
}
/** @public */
export type Abortable = {
/**
* @experimental
* When provided, the corresponding `AbortController` can be used to abort an asynchronous action.
*
* The `signal.reason` value is used as the error thrown.
*
* @remarks
* **NOTE:** If an abort signal aborts an operation while the driver is writing to the underlying
* socket or reading the response from the server, the socket will be closed.
* If signals are aborted at a high rate during socket read/writes this can lead to a high rate of connection reestablishment.
*
* We plan to mitigate this in a future release, please follow NODE-6062 (`timeoutMS` expiration suffers the same limitation).
*
* AbortSignals are likely a best fit for human interactive interruption (ex. ctrl-C) where the frequency
* of cancellation is reasonably low. If a signal is programmatically aborted for 100s of operations you can empty
* the driver's connection pool.
*
* @example
* ```js
* const controller = new AbortController();
* const { signal } = controller;
* process.on('SIGINT', () => controller.abort(new Error('^C pressed')));
*
* try {
* const res = await fetch('...', { signal });
* await collection.findOne(await res.json(), { signal });
* catch (error) {
* if (error === signal.reason) {
* // signal abort error handling
* }
* }
* ```
*/
signal?: AbortSignal | undefined;
};
/**
* Helper types for dot-notation filter attributes
*/
/** @public */
export type Join<T extends unknown[], D extends string> = T extends []
? ''
: T extends [string | number]
? `${T[0]}`
: T extends [string | number, ...infer R]
? `${T[0]}${D}${Join<R, D>}`
: string;
/** @public */
export type PropertyType<Type, Property extends string> = string extends Property
? unknown
: Property extends keyof Type
? Type[Property]
: Property extends `${number}`
? Type extends ReadonlyArray<infer ArrayType>
? ArrayType
: unknown
: Property extends `${infer Key}.${infer Rest}`
? Key extends `${number}`
? Type extends ReadonlyArray<infer ArrayType>
? PropertyType<ArrayType, Rest>
: unknown
: Key extends keyof Type
? Type[Key] extends Map<string, infer MapType>
? MapType
: PropertyType<Type[Key], Rest>
: unknown
: unknown;
/**
* @public
* returns tuple of strings (keys to be joined on '.') that represent every path into a schema
* https://www.mongodb.com/docs/manual/tutorial/query-embedded-documents/
*
* @remarks
* Through testing we determined that a depth of 8 is safe for the typescript compiler
* and provides reasonable compilation times. This number is otherwise not special and
* should be changed if issues are found with this level of checking. Beyond this
* depth any helpers that make use of NestedPaths should devolve to not asserting any
* type safety on the input.
*/
export type NestedPaths<Type, Depth extends number[]> = Depth['length'] extends 8
? []
: Type extends
| string
| number
| bigint
| boolean
| Date
| RegExp
| Buffer
| Uint8Array
| ((...args: any[]) => any)
| { _bsontype: string }
? []
: Type extends ReadonlyArray<infer ArrayType>
? [] | [number, ...NestedPaths<ArrayType, [...Depth, 1]>]
: Type extends Map<string, any>
? [string]
: Type extends object
? {
[Key in Extract<keyof Type, string>]: Type[Key] extends Type // type of value extends the parent
? [Key]
: // for a recursive union type, the child will never extend the parent type.
// but the parent will still extend the child
Type extends Type[Key]
? [Key]
: Type[Key] extends ReadonlyArray<infer ArrayType> // handling recursive types with arrays
? Type extends ArrayType // is the type of the parent the same as the type of the array?
? [Key] // yes, it's a recursive array type
: // for unions, the child type extends the parent
ArrayType extends Type
? [Key] // we have a recursive array union
: // child is an array, but it's not a recursive array
[Key, ...NestedPaths<Type[Key], [...Depth, 1]>]
: // child is not structured the same as the parent
[Key, ...NestedPaths<Type[Key], [...Depth, 1]>] | [Key];
}[Extract<keyof Type, string>]
: [];
/**
* @public
* returns keys (strings) for every path into a schema with a value of type
* https://www.mongodb.com/docs/manual/tutorial/query-embedded-documents/
*/
export type NestedPathsOfType<TSchema, Type> = KeysOfAType<
{
[Property in Join<NestedPaths<TSchema, []>, '.'>]: PropertyType<TSchema, Property>;
},
Type
>;
/**
* @public
* @experimental
*/
export type StrictFilter<TSchema> =
| Partial<TSchema>
| ({
[Property in Join<NestedPaths<WithId<TSchema>, []>, '.'>]?: Condition<
PropertyType<WithId<TSchema>, Property>
>;
} & RootFilterOperators<WithId<TSchema>>);
/**
* @public
* @experimental
*/
export type StrictUpdateFilter<TSchema> = {
$currentDate?: OnlyFieldsOfType<
TSchema,
Date | Timestamp,
true | { $type: 'date' | 'timestamp' }
>;
$inc?: OnlyFieldsOfType<TSchema, NumericType | undefined>;
$min?: StrictMatchKeysAndValues<TSchema>;
$max?: StrictMatchKeysAndValues<TSchema>;
$mul?: OnlyFieldsOfType<TSchema, NumericType | undefined>;
$rename?: Record<string, string>;
$set?: StrictMatchKeysAndValues<TSchema>;
$setOnInsert?: StrictMatchKeysAndValues<TSchema>;
$unset?: OnlyFieldsOfType<TSchema, any, '' | true | 1>;
$addToSet?: SetFields<TSchema>;
$pop?: OnlyFieldsOfType<TSchema, ReadonlyArray<any>, 1 | -1>;
$pull?: PullOperator<TSchema>;
$push?: PushOperator<TSchema>;
$pullAll?: PullAllOperator<TSchema>;
$bit?: OnlyFieldsOfType<
TSchema,
NumericType | undefined,
{ and: IntegerType } | { or: IntegerType } | { xor: IntegerType }
>;
} & Document;
/**
* @public
* @experimental
*/
export type StrictMatchKeysAndValues<TSchema> = Readonly<
{
[Property in Join<NestedPaths<TSchema, []>, '.'>]?: PropertyType<TSchema, Property>;
} & {
[Property in `${NestedPathsOfType<TSchema, any[]>}.$${`[${string}]` | ''}`]?: ArrayElement<
PropertyType<TSchema, Property extends `${infer Key}.$${string}` ? Key : never>
>;
} & {
[Property in `${NestedPathsOfType<TSchema, Record<string, any>[]>}.$${
| `[${string}]`
| ''}.${string}`]?: any; // Could be further narrowed
} & Document
>;

View file

@ -0,0 +1,171 @@
import type { Document } from '../bson';
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
import { type CursorTimeoutMode } from '../cursor/abstract_cursor';
import { MongoInvalidArgumentError } from '../error';
import { type ExplainOptions } from '../explain';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { maxWireVersion, type MongoDBNamespace } from '../utils';
import { WriteConcern } from '../write_concern';
import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects, type Hint } from './operation';
/** @internal */
// eslint-disable-next-line @typescript-eslint/no-unnecessary-type-assertion
export const DB_AGGREGATE_COLLECTION = 1 as const;
const MIN_WIRE_VERSION_$OUT_READ_CONCERN_SUPPORT = 8;
/** @public */
export interface AggregateOptions extends Omit<CommandOperationOptions, 'explain'> {
/** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */
allowDiskUse?: boolean;
/** The number of documents to return per batch. See [aggregation documentation](https://www.mongodb.com/docs/manual/reference/command/aggregate). */
batchSize?: number;
/** Allow driver to bypass schema validation. */
bypassDocumentValidation?: boolean;
/** Return the query as cursor, on 2.6 \> it returns as a real cursor on pre 2.6 it returns as an emulated cursor. */
cursor?: Document;
/**
* Specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.
*/
maxTimeMS?: number;
/** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. */
maxAwaitTimeMS?: number;
/** Specify collation. */
collation?: CollationOptions;
/** Add an index selection hint to an aggregation command */
hint?: Hint;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
out?: string;
/**
* Specifies the verbosity mode for the explain output.
* @deprecated This API is deprecated in favor of `collection.aggregate().explain()`
* or `db.aggregate().explain()`.
*/
explain?: ExplainOptions['explain'];
/** @internal */
timeoutMode?: CursorTimeoutMode;
}
/** @internal */
export class AggregateOperation extends CommandOperation<CursorResponse> {
override options: AggregateOptions;
target: string | typeof DB_AGGREGATE_COLLECTION;
pipeline: Document[];
hasWriteStage: boolean;
constructor(ns: MongoDBNamespace, pipeline: Document[], options?: AggregateOptions) {
super(undefined, { ...options, dbName: ns.db });
this.options = { ...options };
// Covers when ns.collection is null, undefined or the empty string, use DB_AGGREGATE_COLLECTION
this.target = ns.collection || DB_AGGREGATE_COLLECTION;
this.pipeline = pipeline;
// determine if we have a write stage, override read preference if so
this.hasWriteStage = false;
if (typeof options?.out === 'string') {
this.pipeline = this.pipeline.concat({ $out: options.out });
this.hasWriteStage = true;
} else if (pipeline.length > 0) {
const finalStage = pipeline[pipeline.length - 1];
if (finalStage.$out || finalStage.$merge) {
this.hasWriteStage = true;
}
}
if (this.hasWriteStage) {
this.trySecondaryWrite = true;
} else {
delete this.options.writeConcern;
}
if (this.explain && this.writeConcern) {
throw new MongoInvalidArgumentError(
'Option "explain" cannot be used on an aggregate call with writeConcern'
);
}
if (options?.cursor != null && typeof options.cursor !== 'object') {
throw new MongoInvalidArgumentError('Cursor options must be an object');
}
}
override get commandName() {
return 'aggregate' as const;
}
override get canRetryRead(): boolean {
return !this.hasWriteStage;
}
addToPipeline(stage: Document): void {
this.pipeline.push(stage);
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<CursorResponse> {
const options: AggregateOptions = this.options;
const serverWireVersion = maxWireVersion(server);
const command: Document = { aggregate: this.target, pipeline: this.pipeline };
if (this.hasWriteStage && serverWireVersion < MIN_WIRE_VERSION_$OUT_READ_CONCERN_SUPPORT) {
this.readConcern = undefined;
}
if (this.hasWriteStage && this.writeConcern) {
WriteConcern.apply(command, this.writeConcern);
}
if (options.bypassDocumentValidation === true) {
command.bypassDocumentValidation = options.bypassDocumentValidation;
}
if (typeof options.allowDiskUse === 'boolean') {
command.allowDiskUse = options.allowDiskUse;
}
if (options.hint) {
command.hint = options.hint;
}
if (options.let) {
command.let = options.let;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (options.comment !== undefined) {
command.comment = options.comment;
}
command.cursor = options.cursor || {};
if (options.batchSize && !this.hasWriteStage) {
command.cursor.batchSize = options.batchSize;
}
return await super.executeCommand(
server,
session,
command,
timeoutContext,
this.explain ? ExplainedCursorResponse : CursorResponse
);
}
}
defineAspects(AggregateOperation, [
Aspect.READ_OPERATION,
Aspect.RETRYABLE,
Aspect.EXPLAINABLE,
Aspect.CURSOR_CREATING
]);

View file

@ -0,0 +1,64 @@
import type {
AnyBulkWriteOperation,
BulkOperationBase,
BulkWriteOptions,
BulkWriteResult
} from '../bulk/common';
import type { Collection } from '../collection';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { AbstractOperation, Aspect, defineAspects } from './operation';
/** @internal */
export class BulkWriteOperation extends AbstractOperation<BulkWriteResult> {
override options: BulkWriteOptions;
collection: Collection;
operations: ReadonlyArray<AnyBulkWriteOperation>;
constructor(
collection: Collection,
operations: ReadonlyArray<AnyBulkWriteOperation>,
options: BulkWriteOptions
) {
super(options);
this.options = options;
this.collection = collection;
this.operations = operations;
}
override get commandName() {
return 'bulkWrite' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<BulkWriteResult> {
const coll = this.collection;
const operations = this.operations;
const options = {
...this.options,
...this.bsonOptions,
readPreference: this.readPreference,
timeoutContext
};
// Create the bulk operation
const bulk: BulkOperationBase =
options.ordered === false
? coll.initializeUnorderedBulkOp(options)
: coll.initializeOrderedBulkOp(options);
// for each op go through and add to the bulk
for (let i = 0; i < operations.length; i++) {
bulk.raw(operations[i]);
}
// Execute the bulk
return await bulk.execute({ ...options, session });
}
}
defineAspects(BulkWriteOperation, [Aspect.WRITE_OPERATION]);

View file

@ -0,0 +1,115 @@
import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta';
import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses';
import type { Server } from '../../sdam/server';
import type { ClientSession } from '../../sessions';
import { type TimeoutContext } from '../../timeout';
import { MongoDBNamespace } from '../../utils';
import { CommandOperation } from '../command';
import { Aspect, defineAspects } from '../operation';
import { type ClientBulkWriteCommandBuilder } from './command_builder';
import { type ClientBulkWriteOptions } from './common';
/**
* Executes a single client bulk write operation within a potential batch.
* @internal
*/
export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCursorResponse> {
commandBuilder: ClientBulkWriteCommandBuilder;
override options: ClientBulkWriteOptions;
override get commandName() {
return 'bulkWrite' as const;
}
constructor(commandBuilder: ClientBulkWriteCommandBuilder, options: ClientBulkWriteOptions) {
super(undefined, options);
this.commandBuilder = commandBuilder;
this.options = options;
this.ns = new MongoDBNamespace('admin', '$cmd');
}
override resetBatch(): boolean {
return this.commandBuilder.resetBatch();
}
override get canRetryWrite(): boolean {
return this.commandBuilder.isBatchRetryable;
}
/**
* Execute the command. Superclass will handle write concern, etc.
* @param server - The server.
* @param session - The session.
* @returns The response.
*/
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<ClientBulkWriteCursorResponse> {
let command;
if (server.description.type === ServerType.LoadBalancer) {
if (session) {
let connection;
if (!session.pinnedConnection) {
// Checkout a connection to build the command.
connection = await server.pool.checkOut({ timeoutContext });
// Pin the connection to the session so it get used to execute the command and we do not
// perform a double check-in/check-out.
session.pin(connection);
} else {
connection = session.pinnedConnection;
}
command = this.commandBuilder.buildBatch(
connection.hello?.maxMessageSizeBytes,
connection.hello?.maxWriteBatchSize,
connection.hello?.maxBsonObjectSize
);
} else {
throw new MongoClientBulkWriteExecutionError(
'Session provided to the client bulk write operation must be present.'
);
}
} else {
// At this point we have a server and the auto connect code has already
// run in executeOperation, so the server description will be populated.
// We can use that to build the command.
if (
!server.description.maxWriteBatchSize ||
!server.description.maxMessageSizeBytes ||
!server.description.maxBsonObjectSize
) {
throw new MongoClientBulkWriteExecutionError(
'In order to execute a client bulk write, both maxWriteBatchSize, maxMessageSizeBytes and maxBsonObjectSize must be provided by the servers hello response.'
);
}
command = this.commandBuilder.buildBatch(
server.description.maxMessageSizeBytes,
server.description.maxWriteBatchSize,
server.description.maxBsonObjectSize
);
}
// Check after the batch is built if we cannot retry it and override the option.
if (!this.canRetryWrite) {
this.options.willRetryWrite = false;
}
return await super.executeCommand(
server,
session,
command,
timeoutContext,
ClientBulkWriteCursorResponse
);
}
}
// Skipping the collation as it goes on the individual ops.
defineAspects(ClientBulkWriteOperation, [
Aspect.WRITE_OPERATION,
Aspect.SKIP_COLLATION,
Aspect.CURSOR_CREATING,
Aspect.RETRYABLE,
Aspect.COMMAND_BATCHING
]);

View file

@ -0,0 +1,469 @@
import { BSON, type Document } from '../../bson';
import { DocumentSequence } from '../../cmap/commands';
import { MongoAPIError, MongoInvalidArgumentError } from '../../error';
import { type PkFactory } from '../../mongo_client';
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
import { DEFAULT_PK_FACTORY, hasAtomicOperators } from '../../utils';
import { type CollationOptions } from '../command';
import { type Hint } from '../operation';
import type {
AnyClientBulkWriteModel,
ClientBulkWriteOptions,
ClientDeleteManyModel,
ClientDeleteOneModel,
ClientInsertOneModel,
ClientReplaceOneModel,
ClientUpdateManyModel,
ClientUpdateOneModel
} from './common';
/** @internal */
export interface ClientBulkWriteCommand {
bulkWrite: 1;
errorsOnly: boolean;
ordered: boolean;
ops: DocumentSequence;
nsInfo: DocumentSequence;
bypassDocumentValidation?: boolean;
let?: Document;
comment?: any;
}
/**
* The bytes overhead for the extra fields added post command generation.
*/
const MESSAGE_OVERHEAD_BYTES = 1000;
/** @internal */
export class ClientBulkWriteCommandBuilder {
models: ReadonlyArray<AnyClientBulkWriteModel<Document>>;
options: ClientBulkWriteOptions;
pkFactory: PkFactory;
/** The current index in the models array that is being processed. */
currentModelIndex: number;
/** The model index that the builder was on when it finished the previous batch. Used for resets when retrying. */
previousModelIndex: number;
/** The last array of operations that were created. Used by the results merger for indexing results. */
lastOperations: Document[];
/** Returns true if the current batch being created has no multi-updates. */
isBatchRetryable: boolean;
/**
* Create the command builder.
* @param models - The client write models.
*/
constructor(
models: ReadonlyArray<AnyClientBulkWriteModel<Document>>,
options: ClientBulkWriteOptions,
pkFactory?: PkFactory
) {
this.models = models;
this.options = options;
this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY;
this.currentModelIndex = 0;
this.previousModelIndex = 0;
this.lastOperations = [];
this.isBatchRetryable = true;
}
/**
* Gets the errorsOnly value for the command, which is the inverse of the
* user provided verboseResults option. Defaults to true.
*/
get errorsOnly(): boolean {
if ('verboseResults' in this.options) {
return !this.options.verboseResults;
}
return true;
}
/**
* Determines if there is another batch to process.
* @returns True if not all batches have been built.
*/
hasNextBatch(): boolean {
return this.currentModelIndex < this.models.length;
}
/**
* When we need to retry a command we need to set the current
* model index back to its previous value.
*/
resetBatch(): boolean {
this.currentModelIndex = this.previousModelIndex;
return true;
}
/**
* Build a single batch of a client bulk write command.
* @param maxMessageSizeBytes - The max message size in bytes.
* @param maxWriteBatchSize - The max write batch size.
* @returns The client bulk write command.
*/
buildBatch(
maxMessageSizeBytes: number,
maxWriteBatchSize: number,
maxBsonObjectSize: number
): ClientBulkWriteCommand {
// We start by assuming the batch has no multi-updates, so it is retryable
// until we find them.
this.isBatchRetryable = true;
let commandLength = 0;
let currentNamespaceIndex = 0;
const command: ClientBulkWriteCommand = this.baseCommand();
const namespaces = new Map<string, number>();
// In the case of retries we need to mark where we started this batch.
this.previousModelIndex = this.currentModelIndex;
while (this.currentModelIndex < this.models.length) {
const model = this.models[this.currentModelIndex];
const ns = model.namespace;
const nsIndex = namespaces.get(ns);
// Multi updates are not retryable.
if (model.name === 'deleteMany' || model.name === 'updateMany') {
this.isBatchRetryable = false;
}
if (nsIndex != null) {
// Build the operation and serialize it to get the bytes buffer.
const operation = buildOperation(model, nsIndex, this.pkFactory);
let operationBuffer;
try {
operationBuffer = BSON.serialize(operation);
} catch (cause) {
throw new MongoInvalidArgumentError(`Could not serialize operation to BSON`, { cause });
}
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation buffer can fit in the command. If it can,
// then add the operation to the document sequence and increment the
// current length as long as the ops don't exceed the maxWriteBatchSize.
if (
commandLength + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize
) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
// Increment the builder's current model index.
this.currentModelIndex++;
} else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
} else {
// The namespace is not already in the nsInfo so we will set it in the map, and
// construct our nsInfo and ops documents and buffers.
namespaces.set(ns, currentNamespaceIndex);
const nsInfo = { ns: ns };
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
let nsInfoBuffer;
let operationBuffer;
try {
nsInfoBuffer = BSON.serialize(nsInfo);
operationBuffer = BSON.serialize(operation);
} catch (cause) {
throw new MongoInvalidArgumentError(`Could not serialize ns info to BSON`, { cause });
}
validateBufferSize('nsInfo', nsInfoBuffer, maxBsonObjectSize);
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation and nsInfo buffers can fit in the command. If they
// can, then add the operation and nsInfo to their respective document
// sequences and increment the current length as long as the ops don't exceed
// the maxWriteBatchSize.
if (
commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize
) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength =
MESSAGE_OVERHEAD_BYTES +
command.nsInfo.push(nsInfo, nsInfoBuffer) +
command.ops.push(operation, operationBuffer);
// We've added a new namespace, increment the namespace index.
currentNamespaceIndex++;
// Increment the builder's current model index.
this.currentModelIndex++;
} else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
}
// Set the last operations and return the command.
this.lastOperations = command.ops.documents;
return command;
}
private baseCommand(): ClientBulkWriteCommand {
const command: ClientBulkWriteCommand = {
bulkWrite: 1,
errorsOnly: this.errorsOnly,
ordered: this.options.ordered ?? true,
ops: new DocumentSequence('ops'),
nsInfo: new DocumentSequence('nsInfo')
};
// Add bypassDocumentValidation if it was present in the options.
if (this.options.bypassDocumentValidation != null) {
command.bypassDocumentValidation = this.options.bypassDocumentValidation;
}
// Add let if it was present in the options.
if (this.options.let) {
command.let = this.options.let;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (this.options.comment !== undefined) {
command.comment = this.options.comment;
}
return command;
}
}
function validateBufferSize(name: string, buffer: Uint8Array, maxBsonObjectSize: number) {
if (buffer.length > maxBsonObjectSize) {
throw new MongoInvalidArgumentError(
`Client bulk write operation ${name} of length ${buffer.length} exceeds the max bson object size of ${maxBsonObjectSize}`
);
}
}
/** @internal */
interface ClientInsertOperation {
insert: number;
document: OptionalId<Document>;
}
/**
* Build the insert one operation.
* @param model - The insert one model.
* @param index - The namespace index.
* @returns the operation.
*/
export const buildInsertOneOperation = (
model: ClientInsertOneModel<Document>,
index: number,
pkFactory: PkFactory
): ClientInsertOperation => {
const document: ClientInsertOperation = {
insert: index,
document: model.document
};
document.document._id = model.document._id ?? pkFactory.createPk();
return document;
};
/** @internal */
export interface ClientDeleteOperation {
delete: number;
multi: boolean;
filter: Filter<Document>;
hint?: Hint;
collation?: CollationOptions;
}
/**
* Build the delete one operation.
* @param model - The insert many model.
* @param index - The namespace index.
* @returns the operation.
*/
export const buildDeleteOneOperation = (
model: ClientDeleteOneModel<Document>,
index: number
): Document => {
return createDeleteOperation(model, index, false);
};
/**
* Build the delete many operation.
* @param model - The delete many model.
* @param index - The namespace index.
* @returns the operation.
*/
export const buildDeleteManyOperation = (
model: ClientDeleteManyModel<Document>,
index: number
): Document => {
return createDeleteOperation(model, index, true);
};
/**
* Creates a delete operation based on the parameters.
*/
function createDeleteOperation(
model: ClientDeleteOneModel<Document> | ClientDeleteManyModel<Document>,
index: number,
multi: boolean
): ClientDeleteOperation {
const document: ClientDeleteOperation = {
delete: index,
multi: multi,
filter: model.filter
};
if (model.hint) {
document.hint = model.hint;
}
if (model.collation) {
document.collation = model.collation;
}
return document;
}
/** @internal */
export interface ClientUpdateOperation {
update: number;
multi: boolean;
filter: Filter<Document>;
updateMods: UpdateFilter<Document> | Document[];
hint?: Hint;
upsert?: boolean;
arrayFilters?: Document[];
collation?: CollationOptions;
}
/**
* Build the update one operation.
* @param model - The update one model.
* @param index - The namespace index.
* @returns the operation.
*/
export const buildUpdateOneOperation = (
model: ClientUpdateOneModel<Document>,
index: number
): ClientUpdateOperation => {
return createUpdateOperation(model, index, false);
};
/**
* Build the update many operation.
* @param model - The update many model.
* @param index - The namespace index.
* @returns the operation.
*/
export const buildUpdateManyOperation = (
model: ClientUpdateManyModel<Document>,
index: number
): ClientUpdateOperation => {
return createUpdateOperation(model, index, true);
};
/**
* Validate the update document.
* @param update - The update document.
*/
function validateUpdate(update: Document) {
if (!hasAtomicOperators(update)) {
throw new MongoAPIError(
'Client bulk write update models must only contain atomic modifiers (start with $) and must not be empty.'
);
}
}
/**
* Creates a delete operation based on the parameters.
*/
function createUpdateOperation(
model: ClientUpdateOneModel<Document> | ClientUpdateManyModel<Document>,
index: number,
multi: boolean
): ClientUpdateOperation {
// Update documents provided in UpdateOne and UpdateMany write models are
// required only to contain atomic modifiers (i.e. keys that start with "$").
// Drivers MUST throw an error if an update document is empty or if the
// document's first key does not start with "$".
validateUpdate(model.update);
const document: ClientUpdateOperation = {
update: index,
multi: multi,
filter: model.filter,
updateMods: model.update
};
if (model.hint) {
document.hint = model.hint;
}
if (model.upsert) {
document.upsert = model.upsert;
}
if (model.arrayFilters) {
document.arrayFilters = model.arrayFilters;
}
if (model.collation) {
document.collation = model.collation;
}
return document;
}
/** @internal */
export interface ClientReplaceOneOperation {
update: number;
multi: boolean;
filter: Filter<Document>;
updateMods: WithoutId<Document>;
hint?: Hint;
upsert?: boolean;
collation?: CollationOptions;
}
/**
* Build the replace one operation.
* @param model - The replace one model.
* @param index - The namespace index.
* @returns the operation.
*/
export const buildReplaceOneOperation = (
model: ClientReplaceOneModel<Document>,
index: number
): ClientReplaceOneOperation => {
if (hasAtomicOperators(model.replacement)) {
throw new MongoAPIError(
'Client bulk write replace models must not contain atomic modifiers (start with $) and must not be empty.'
);
}
const document: ClientReplaceOneOperation = {
update: index,
multi: false,
filter: model.filter,
updateMods: model.replacement
};
if (model.hint) {
document.hint = model.hint;
}
if (model.upsert) {
document.upsert = model.upsert;
}
if (model.collation) {
document.collation = model.collation;
}
return document;
};
/** @internal */
export function buildOperation(
model: AnyClientBulkWriteModel<Document>,
index: number,
pkFactory: PkFactory
): Document {
switch (model.name) {
case 'insertOne':
return buildInsertOneOperation(model, index, pkFactory);
case 'deleteOne':
return buildDeleteOneOperation(model, index);
case 'deleteMany':
return buildDeleteManyOperation(model, index);
case 'updateOne':
return buildUpdateOneOperation(model, index);
case 'updateMany':
return buildUpdateManyOperation(model, index);
case 'replaceOne':
return buildReplaceOneOperation(model, index);
}
}

View file

@ -0,0 +1,271 @@
import { type Document } from '../../bson';
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
import type { CollationOptions, CommandOperationOptions } from '../../operations/command';
import type { Hint } from '../../operations/operation';
/** @public */
export interface ClientBulkWriteOptions extends CommandOperationOptions {
/**
* If true, when an insert fails, don't execute the remaining writes.
* If false, continue with remaining inserts when one fails.
* @defaultValue `true` - inserts are ordered by default
*/
ordered?: boolean;
/**
* Allow driver to bypass schema validation.
* @defaultValue `false` - documents will be validated by default
**/
bypassDocumentValidation?: boolean;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
/**
* Whether detailed results for each successful operation should be included in the returned
* BulkWriteResult.
*/
verboseResults?: boolean;
}
/** @public */
export interface ClientWriteModel {
/**
* The namespace for the write.
*
* A namespace is a combination of the database name and the name of the collection: `<database-name>.<collection>`.
* All documents belong to a namespace.
*
* @see https://www.mongodb.com/docs/manual/reference/limits/#std-label-faq-dev-namespace
*/
namespace: string;
}
/** @public */
export interface ClientInsertOneModel<TSchema> extends ClientWriteModel {
name: 'insertOne';
/** The document to insert. */
document: OptionalId<TSchema>;
}
/** @public */
export interface ClientDeleteOneModel<TSchema> extends ClientWriteModel {
name: 'deleteOne';
/**
* The filter used to determine if a document should be deleted.
* For a deleteOne operation, the first match is removed.
*/
filter: Filter<TSchema>;
/** Specifies a collation. */
collation?: CollationOptions;
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
hint?: Hint;
}
/** @public */
export interface ClientDeleteManyModel<TSchema> extends ClientWriteModel {
name: 'deleteMany';
/**
* The filter used to determine if a document should be deleted.
* For a deleteMany operation, all matches are removed.
*/
filter: Filter<TSchema>;
/** Specifies a collation. */
collation?: CollationOptions;
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
hint?: Hint;
}
/** @public */
export interface ClientReplaceOneModel<TSchema> extends ClientWriteModel {
name: 'replaceOne';
/**
* The filter used to determine if a document should be replaced.
* For a replaceOne operation, the first match is replaced.
*/
filter: Filter<TSchema>;
/** The document with which to replace the matched document. */
replacement: WithoutId<TSchema>;
/** Specifies a collation. */
collation?: CollationOptions;
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
hint?: Hint;
/** When true, creates a new document if no document matches the query. */
upsert?: boolean;
}
/** @public */
export interface ClientUpdateOneModel<TSchema> extends ClientWriteModel {
name: 'updateOne';
/**
* The filter used to determine if a document should be updated.
* For an updateOne operation, the first match is updated.
*/
filter: Filter<TSchema>;
/**
* The modifications to apply. The value can be either:
* UpdateFilter<Document> - A document that contains update operator expressions,
* Document[] - an aggregation pipeline.
*/
update: UpdateFilter<TSchema> | Document[];
/** A set of filters specifying to which array elements an update should apply. */
arrayFilters?: Document[];
/** Specifies a collation. */
collation?: CollationOptions;
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
hint?: Hint;
/** When true, creates a new document if no document matches the query. */
upsert?: boolean;
}
/** @public */
export interface ClientUpdateManyModel<TSchema> extends ClientWriteModel {
name: 'updateMany';
/**
* The filter used to determine if a document should be updated.
* For an updateMany operation, all matches are updated.
*/
filter: Filter<TSchema>;
/**
* The modifications to apply. The value can be either:
* UpdateFilter<Document> - A document that contains update operator expressions,
* Document[] - an aggregation pipeline.
*/
update: UpdateFilter<TSchema> | Document[];
/** A set of filters specifying to which array elements an update should apply. */
arrayFilters?: Document[];
/** Specifies a collation. */
collation?: CollationOptions;
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
hint?: Hint;
/** When true, creates a new document if no document matches the query. */
upsert?: boolean;
}
/**
* Used to represent any of the client bulk write models that can be passed as an array
* to MongoClient#bulkWrite.
* @public
*/
export type AnyClientBulkWriteModel<TSchema extends Document> =
| ClientInsertOneModel<TSchema>
| ClientReplaceOneModel<TSchema>
| ClientUpdateOneModel<TSchema>
| ClientUpdateManyModel<TSchema>
| ClientDeleteOneModel<TSchema>
| ClientDeleteManyModel<TSchema>;
/**
* A mapping of namespace strings to collections schemas.
* @public
*
* @example
* ```ts
* type MongoDBSchemas = {
* 'db.books': Book;
* 'db.authors': Author;
* }
*
* const model: ClientBulkWriteModel<MongoDBSchemas> = {
* namespace: 'db.books'
* name: 'insertOne',
* document: { title: 'Practical MongoDB Aggregations', authorName: 3 } // error `authorName` cannot be number
* };
* ```
*
* The type of the `namespace` field narrows other parts of the BulkWriteModel to use the correct schema for type assertions.
*
*/
export type ClientBulkWriteModel<
SchemaMap extends Record<string, Document> = Record<string, Document>
> = {
[Namespace in keyof SchemaMap]: AnyClientBulkWriteModel<SchemaMap[Namespace]> & {
namespace: Namespace;
};
}[keyof SchemaMap];
/** @public */
export interface ClientBulkWriteResult {
/**
* Whether the bulk write was acknowledged.
*/
readonly acknowledged: boolean;
/**
* The total number of documents inserted across all insert operations.
*/
readonly insertedCount: number;
/**
* The total number of documents upserted across all update operations.
*/
readonly upsertedCount: number;
/**
* The total number of documents matched across all update operations.
*/
readonly matchedCount: number;
/**
* The total number of documents modified across all update operations.
*/
readonly modifiedCount: number;
/**
* The total number of documents deleted across all delete operations.
*/
readonly deletedCount: number;
/**
* The results of each individual insert operation that was successfully performed.
*/
readonly insertResults?: ReadonlyMap<number, ClientInsertOneResult>;
/**
* The results of each individual update operation that was successfully performed.
*/
readonly updateResults?: ReadonlyMap<number, ClientUpdateResult>;
/**
* The results of each individual delete operation that was successfully performed.
*/
readonly deleteResults?: ReadonlyMap<number, ClientDeleteResult>;
}
/** @public */
export interface ClientBulkWriteError {
code: number;
message: string;
}
/** @public */
export interface ClientInsertOneResult {
/**
* The _id of the inserted document.
*/
insertedId: any;
}
/** @public */
export interface ClientUpdateResult {
/**
* The number of documents that matched the filter.
*/
matchedCount: number;
/**
* The number of documents that were modified.
*/
modifiedCount: number;
/**
* The _id field of the upserted document if an upsert occurred.
*
* It MUST be possible to discern between a BSON Null upserted ID value and this field being
* unset. If necessary, drivers MAY add a didUpsert boolean field to differentiate between
* these two cases.
*/
upsertedId?: any;
/**
* Determines if the upsert did include an _id, which includes the case of the _id being null.
*/
didUpsert: boolean;
}
/** @public */
export interface ClientDeleteResult {
/**
* The number of documents that were deleted.
*/
deletedCount: number;
}

View file

@ -0,0 +1,149 @@
import { type Document } from '../../bson';
import { CursorTimeoutContext, CursorTimeoutMode } from '../../cursor/abstract_cursor';
import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
import {
MongoClientBulkWriteError,
MongoClientBulkWriteExecutionError,
MongoInvalidArgumentError,
MongoServerError
} from '../../error';
import { type MongoClient } from '../../mongo_client';
import { TimeoutContext } from '../../timeout';
import { resolveTimeoutOptions } from '../../utils';
import { WriteConcern } from '../../write_concern';
import { executeOperation } from '../execute_operation';
import { ClientBulkWriteOperation } from './client_bulk_write';
import { ClientBulkWriteCommandBuilder } from './command_builder';
import {
type AnyClientBulkWriteModel,
type ClientBulkWriteOptions,
type ClientBulkWriteResult
} from './common';
import { ClientBulkWriteResultsMerger } from './results_merger';
/**
* Responsible for executing a client bulk write.
* @internal
*/
export class ClientBulkWriteExecutor {
private readonly client: MongoClient;
private readonly options: ClientBulkWriteOptions;
private readonly operations: ReadonlyArray<AnyClientBulkWriteModel<Document>>;
/**
* Instantiate the executor.
* @param client - The mongo client.
* @param operations - The user supplied bulk write models.
* @param options - The bulk write options.
*/
constructor(
client: MongoClient,
operations: ReadonlyArray<AnyClientBulkWriteModel<Document>>,
options?: ClientBulkWriteOptions
) {
if (operations.length === 0) {
throw new MongoClientBulkWriteExecutionError('No client bulk write models were provided.');
}
this.client = client;
this.operations = operations;
this.options = {
ordered: true,
bypassDocumentValidation: false,
verboseResults: false,
...options
};
// If no write concern was provided, we inherit one from the client.
if (!this.options.writeConcern) {
this.options.writeConcern = WriteConcern.fromOptions(this.client.s.options);
}
if (this.options.writeConcern?.w === 0) {
if (this.options.verboseResults) {
throw new MongoInvalidArgumentError(
'Cannot request unacknowledged write concern and verbose results'
);
}
if (this.options.ordered) {
throw new MongoInvalidArgumentError(
'Cannot request unacknowledged write concern and ordered writes'
);
}
}
}
/**
* Execute the client bulk write. Will split commands into batches and exhaust the cursors
* for each, then merge the results into one.
* @returns The result.
*/
async execute(): Promise<ClientBulkWriteResult> {
// The command builder will take the user provided models and potential split the batch
// into multiple commands due to size.
const pkFactory = this.client.s.options.pkFactory;
const commandBuilder = new ClientBulkWriteCommandBuilder(
this.operations,
this.options,
pkFactory
);
// Unacknowledged writes need to execute all batches and return { ok: 1}
const resolvedOptions = resolveTimeoutOptions(this.client, this.options);
const context = TimeoutContext.create(resolvedOptions);
if (this.options.writeConcern?.w === 0) {
while (commandBuilder.hasNextBatch()) {
const operation = new ClientBulkWriteOperation(commandBuilder, this.options);
await executeOperation(this.client, operation, context);
}
return ClientBulkWriteResultsMerger.unacknowledged();
} else {
const resultsMerger = new ClientBulkWriteResultsMerger(this.options);
// For each command will will create and exhaust a cursor for the results.
while (commandBuilder.hasNextBatch()) {
const cursorContext = new CursorTimeoutContext(context, Symbol());
const options = {
...this.options,
timeoutContext: cursorContext,
...(resolvedOptions.timeoutMS != null && { timeoutMode: CursorTimeoutMode.LIFETIME })
};
const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, options);
try {
await resultsMerger.merge(cursor);
} catch (error) {
// Write concern errors are recorded in the writeConcernErrors field on MongoClientBulkWriteError.
// When a write concern error is encountered, it should not terminate execution of the bulk write
// for either ordered or unordered bulk writes. However, drivers MUST throw an exception at the end
// of execution if any write concern errors were observed.
if (error instanceof MongoServerError && !(error instanceof MongoClientBulkWriteError)) {
// Server side errors need to be wrapped inside a MongoClientBulkWriteError, where the root
// cause is the error property and a partial result is to be included.
const bulkWriteError = new MongoClientBulkWriteError({
message: 'Mongo client bulk write encountered an error during execution'
});
bulkWriteError.cause = error;
bulkWriteError.partialResult = resultsMerger.bulkWriteResult;
throw bulkWriteError;
} else {
// Client side errors are just thrown.
throw error;
}
}
}
// If we have write concern errors or unordered write errors at the end we throw.
if (resultsMerger.writeConcernErrors.length > 0 || resultsMerger.writeErrors.size > 0) {
const error = new MongoClientBulkWriteError({
message: 'Mongo client bulk write encountered errors during execution.'
});
error.writeConcernErrors = resultsMerger.writeConcernErrors;
error.writeErrors = resultsMerger.writeErrors;
error.partialResult = resultsMerger.bulkWriteResult;
throw error;
}
return resultsMerger.bulkWriteResult;
}
}
}

View file

@ -0,0 +1,260 @@
import { MongoWriteConcernError } from '../..';
import { type Document } from '../../bson';
import { type ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
import { MongoClientBulkWriteError } from '../../error';
import {
type ClientBulkWriteError,
type ClientBulkWriteOptions,
type ClientBulkWriteResult,
type ClientDeleteResult,
type ClientInsertOneResult,
type ClientUpdateResult
} from './common';
/**
* Unacknowledged bulk writes are always the same.
*/
const UNACKNOWLEDGED = {
acknowledged: false,
insertedCount: 0,
upsertedCount: 0,
matchedCount: 0,
modifiedCount: 0,
deletedCount: 0,
insertResults: undefined,
updateResults: undefined,
deleteResults: undefined
};
interface ClientBulkWriteResultAccumulation {
/**
* Whether the bulk write was acknowledged.
*/
acknowledged: boolean;
/**
* The total number of documents inserted across all insert operations.
*/
insertedCount: number;
/**
* The total number of documents upserted across all update operations.
*/
upsertedCount: number;
/**
* The total number of documents matched across all update operations.
*/
matchedCount: number;
/**
* The total number of documents modified across all update operations.
*/
modifiedCount: number;
/**
* The total number of documents deleted across all delete operations.
*/
deletedCount: number;
/**
* The results of each individual insert operation that was successfully performed.
*/
insertResults?: Map<number, ClientInsertOneResult>;
/**
* The results of each individual update operation that was successfully performed.
*/
updateResults?: Map<number, ClientUpdateResult>;
/**
* The results of each individual delete operation that was successfully performed.
*/
deleteResults?: Map<number, ClientDeleteResult>;
}
/**
* Merges client bulk write cursor responses together into a single result.
* @internal
*/
export class ClientBulkWriteResultsMerger {
private result: ClientBulkWriteResultAccumulation;
private options: ClientBulkWriteOptions;
private currentBatchOffset: number;
writeConcernErrors: Document[];
writeErrors: Map<number, ClientBulkWriteError>;
/**
* @returns The standard unacknowledged bulk write result.
*/
static unacknowledged(): ClientBulkWriteResult {
return UNACKNOWLEDGED;
}
/**
* Instantiate the merger.
* @param options - The options.
*/
constructor(options: ClientBulkWriteOptions) {
this.options = options;
this.currentBatchOffset = 0;
this.writeConcernErrors = [];
this.writeErrors = new Map();
this.result = {
acknowledged: true,
insertedCount: 0,
upsertedCount: 0,
matchedCount: 0,
modifiedCount: 0,
deletedCount: 0,
insertResults: undefined,
updateResults: undefined,
deleteResults: undefined
};
if (options.verboseResults) {
this.result.insertResults = new Map<number, ClientInsertOneResult>();
this.result.updateResults = new Map<number, ClientUpdateResult>();
this.result.deleteResults = new Map<number, ClientDeleteResult>();
}
}
/**
* Get the bulk write result object.
*/
get bulkWriteResult(): ClientBulkWriteResult {
return {
acknowledged: this.result.acknowledged,
insertedCount: this.result.insertedCount,
upsertedCount: this.result.upsertedCount,
matchedCount: this.result.matchedCount,
modifiedCount: this.result.modifiedCount,
deletedCount: this.result.deletedCount,
insertResults: this.result.insertResults,
updateResults: this.result.updateResults,
deleteResults: this.result.deleteResults
};
}
/**
* Merge the results in the cursor to the existing result.
* @param currentBatchOffset - The offset index to the original models.
* @param response - The cursor response.
* @param documents - The documents in the cursor.
* @returns The current result.
*/
async merge(cursor: ClientBulkWriteCursor): Promise<ClientBulkWriteResult> {
let writeConcernErrorResult;
try {
for await (const document of cursor) {
// Only add to maps if ok: 1
if (document.ok === 1) {
if (this.options.verboseResults) {
this.processDocument(cursor, document);
}
} else {
// If an individual write error is encountered during an ordered bulk write, drivers MUST
// record the error in writeErrors and immediately throw the exception. Otherwise, drivers
// MUST continue to iterate the results cursor and execute any further bulkWrite batches.
if (this.options.ordered) {
const error = new MongoClientBulkWriteError({
message: 'Mongo client ordered bulk write encountered a write error.'
});
error.writeErrors.set(document.idx + this.currentBatchOffset, {
code: document.code,
message: document.errmsg
});
error.partialResult = this.result;
throw error;
} else {
this.writeErrors.set(document.idx + this.currentBatchOffset, {
code: document.code,
message: document.errmsg
});
}
}
}
} catch (error) {
if (error instanceof MongoWriteConcernError) {
const result = error.result;
writeConcernErrorResult = {
insertedCount: result.nInserted,
upsertedCount: result.nUpserted,
matchedCount: result.nMatched,
modifiedCount: result.nModified,
deletedCount: result.nDeleted,
writeConcernError: result.writeConcernError
};
if (this.options.verboseResults && result.cursor.firstBatch) {
for (const document of result.cursor.firstBatch) {
if (document.ok === 1) {
this.processDocument(cursor, document);
}
}
}
} else {
throw error;
}
} finally {
// Update the counts from the cursor response.
if (cursor.response) {
const response = cursor.response;
this.incrementCounts(response);
}
// Increment the batch offset.
this.currentBatchOffset += cursor.operations.length;
}
// If we have write concern errors ensure they are added.
if (writeConcernErrorResult) {
const writeConcernError = writeConcernErrorResult.writeConcernError as Document;
this.incrementCounts(writeConcernErrorResult);
this.writeConcernErrors.push({
code: writeConcernError.code,
message: writeConcernError.errmsg
});
}
return this.result;
}
/**
* Process an individual document in the results.
* @param cursor - The cursor.
* @param document - The document to process.
*/
private processDocument(cursor: ClientBulkWriteCursor, document: Document) {
// Get the corresponding operation from the command.
const operation = cursor.operations[document.idx];
// Handle insert results.
if ('insert' in operation) {
this.result.insertResults?.set(document.idx + this.currentBatchOffset, {
insertedId: operation.document._id
});
}
// Handle update results.
if ('update' in operation) {
const result: ClientUpdateResult = {
matchedCount: document.n,
modifiedCount: document.nModified ?? 0,
// Check if the bulk did actually upsert.
didUpsert: document.upserted != null
};
if (document.upserted) {
result.upsertedId = document.upserted._id;
}
this.result.updateResults?.set(document.idx + this.currentBatchOffset, result);
}
// Handle delete results.
if ('delete' in operation) {
this.result.deleteResults?.set(document.idx + this.currentBatchOffset, {
deletedCount: document.n
});
}
}
/**
* Increment the result counts.
* @param document - The document with the results.
*/
private incrementCounts(document: Document) {
this.result.insertedCount += document.insertedCount;
this.result.upsertedCount += document.upsertedCount;
this.result.matchedCount += document.matchedCount;
this.result.modifiedCount += document.modifiedCount;
this.result.deletedCount += document.deletedCount;
}
}

View file

@ -0,0 +1,47 @@
import { Collection } from '../collection';
import type { Db } from '../db';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { AbstractOperation, type OperationOptions } from './operation';
export interface CollectionsOptions extends OperationOptions {
nameOnly?: boolean;
}
/** @internal */
export class CollectionsOperation extends AbstractOperation<Collection[]> {
override options: CollectionsOptions;
db: Db;
constructor(db: Db, options: CollectionsOptions) {
super(options);
this.options = options;
this.db = db;
}
override get commandName() {
return 'listCollections' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined
): Promise<Collection[]> {
// Let's get the collection names
const documents = await this.db
.listCollections(
{},
{ ...this.options, nameOnly: true, readPreference: this.readPreference, session }
)
.toArray();
const collections: Collection[] = [];
for (const { name } of documents) {
if (!name.includes('$')) {
// Filter collections removing any illegal ones
collections.push(new Collection(this.db, name, this.db.s.options));
}
}
// Return the collection objects
return collections;
}
}

View file

@ -0,0 +1,181 @@
import type { BSONSerializeOptions, Document } from '../bson';
import { type MongoDBResponseConstructor } from '../cmap/wire_protocol/responses';
import { MongoInvalidArgumentError } from '../error';
import {
decorateWithExplain,
Explain,
type ExplainOptions,
validateExplainTimeoutOptions
} from '../explain';
import { ReadConcern } from '../read_concern';
import type { ReadPreference } from '../read_preference';
import type { Server } from '../sdam/server';
import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { commandSupportsReadConcern, maxWireVersion, MongoDBNamespace } from '../utils';
import { WriteConcern, type WriteConcernOptions } from '../write_concern';
import type { ReadConcernLike } from './../read_concern';
import { AbstractOperation, Aspect, type OperationOptions } from './operation';
/** @public */
export interface CollationOptions {
locale: string;
caseLevel?: boolean;
caseFirst?: string;
strength?: number;
numericOrdering?: boolean;
alternate?: string;
maxVariable?: string;
backwards?: boolean;
normalization?: boolean;
}
/** @public */
export interface CommandOperationOptions
extends OperationOptions,
WriteConcernOptions,
ExplainOptions {
/** Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported) */
readConcern?: ReadConcernLike;
/** Collation */
collation?: CollationOptions;
/**
* maxTimeMS is a server-side time limit in milliseconds for processing an operation.
*/
maxTimeMS?: number;
/**
* Comment to apply to the operation.
*
* In server versions pre-4.4, 'comment' must be string. A server
* error will be thrown if any other type is provided.
*
* In server versions 4.4 and above, 'comment' can be any valid BSON type.
*/
comment?: unknown;
/** Should retry failed writes */
retryWrites?: boolean;
// Admin command overrides.
dbName?: string;
authdb?: string;
noResponse?: boolean;
}
/** @internal */
export interface OperationParent {
s: { namespace: MongoDBNamespace };
readConcern?: ReadConcern;
writeConcern?: WriteConcern;
readPreference?: ReadPreference;
bsonOptions?: BSONSerializeOptions;
timeoutMS?: number;
}
/** @internal */
export abstract class CommandOperation<T> extends AbstractOperation<T> {
override options: CommandOperationOptions;
readConcern?: ReadConcern;
writeConcern?: WriteConcern;
explain?: Explain;
constructor(parent?: OperationParent, options?: CommandOperationOptions) {
super(options);
this.options = options ?? {};
// NOTE: this was explicitly added for the add/remove user operations, it's likely
// something we'd want to reconsider. Perhaps those commands can use `Admin`
// as a parent?
const dbNameOverride = options?.dbName || options?.authdb;
if (dbNameOverride) {
this.ns = new MongoDBNamespace(dbNameOverride, '$cmd');
} else {
this.ns = parent
? parent.s.namespace.withCollection('$cmd')
: new MongoDBNamespace('admin', '$cmd');
}
this.readConcern = ReadConcern.fromOptions(options);
this.writeConcern = WriteConcern.fromOptions(options);
if (this.hasAspect(Aspect.EXPLAINABLE)) {
this.explain = Explain.fromOptions(options);
if (this.explain) validateExplainTimeoutOptions(this.options, this.explain);
} else if (options?.explain != null) {
throw new MongoInvalidArgumentError(`Option "explain" is not supported on this command`);
}
}
override get canRetryWrite(): boolean {
if (this.hasAspect(Aspect.EXPLAINABLE)) {
return this.explain == null;
}
return super.canRetryWrite;
}
public async executeCommand<T extends MongoDBResponseConstructor>(
server: Server,
session: ClientSession | undefined,
cmd: Document,
timeoutContext: TimeoutContext,
responseType: T | undefined
): Promise<typeof responseType extends undefined ? Document : InstanceType<T>>;
public async executeCommand(
server: Server,
session: ClientSession | undefined,
cmd: Document,
timeoutContext: TimeoutContext
): Promise<Document>;
async executeCommand(
server: Server,
session: ClientSession | undefined,
cmd: Document,
timeoutContext: TimeoutContext,
responseType?: MongoDBResponseConstructor
): Promise<Document> {
this.server = server;
const options = {
...this.options,
...this.bsonOptions,
timeoutContext,
readPreference: this.readPreference,
session
};
const serverWireVersion = maxWireVersion(server);
const inTransaction = this.session && this.session.inTransaction();
if (this.readConcern && commandSupportsReadConcern(cmd) && !inTransaction) {
Object.assign(cmd, { readConcern: this.readConcern });
}
if (this.trySecondaryWrite && serverWireVersion < MIN_SECONDARY_WRITE_WIRE_VERSION) {
options.omitReadPreference = true;
}
if (this.writeConcern && this.hasAspect(Aspect.WRITE_OPERATION) && !inTransaction) {
WriteConcern.apply(cmd, this.writeConcern);
}
if (
options.collation &&
typeof options.collation === 'object' &&
!this.hasAspect(Aspect.SKIP_COLLATION)
) {
Object.assign(cmd, { collation: options.collation });
}
if (typeof options.maxTimeMS === 'number') {
cmd.maxTimeMS = options.maxTimeMS;
}
if (this.hasAspect(Aspect.EXPLAINABLE) && this.explain) {
cmd = decorateWithExplain(cmd, this.explain);
}
return await server.command(this.ns, cmd, options, responseType);
}
}

View file

@ -0,0 +1,74 @@
import type { Document } from '../bson';
import type { Collection } from '../collection';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import type { MongoDBNamespace } from '../utils';
import { CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects } from './operation';
/** @public */
export interface CountOptions extends CommandOperationOptions {
/** The number of documents to skip. */
skip?: number;
/** The maximum amounts to count before aborting. */
limit?: number;
/**
* Number of milliseconds to wait before aborting the query.
*/
maxTimeMS?: number;
/** An index name hint for the query. */
hint?: string | Document;
}
/** @internal */
export class CountOperation extends CommandOperation<number> {
override options: CountOptions;
collectionName?: string;
query: Document;
constructor(namespace: MongoDBNamespace, filter: Document, options: CountOptions) {
super({ s: { namespace: namespace } } as unknown as Collection, options);
this.options = options;
this.collectionName = namespace.collection;
this.query = filter;
}
override get commandName() {
return 'count' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<number> {
const options = this.options;
const cmd: Document = {
count: this.collectionName,
query: this.query
};
if (typeof options.limit === 'number') {
cmd.limit = options.limit;
}
if (typeof options.skip === 'number') {
cmd.skip = options.skip;
}
if (options.hint != null) {
cmd.hint = options.hint;
}
if (typeof options.maxTimeMS === 'number') {
cmd.maxTimeMS = options.maxTimeMS;
}
const result = await super.executeCommand(server, session, cmd, timeoutContext);
return result ? result.n : 0;
}
}
defineAspects(CountOperation, [Aspect.READ_OPERATION, Aspect.RETRYABLE]);

View file

@ -0,0 +1,213 @@
import type { Document } from '../bson';
import {
MIN_SUPPORTED_QE_SERVER_VERSION,
MIN_SUPPORTED_QE_WIRE_VERSION
} from '../cmap/wire_protocol/constants';
import { Collection } from '../collection';
import type { Db } from '../db';
import { MongoCompatibilityError } from '../error';
import type { PkFactory } from '../mongo_client';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { CommandOperation, type CommandOperationOptions } from './command';
import { CreateIndexesOperation } from './indexes';
import { Aspect, defineAspects } from './operation';
const ILLEGAL_COMMAND_FIELDS = new Set([
'w',
'wtimeout',
'timeoutMS',
'j',
'fsync',
'autoIndexId',
'pkFactory',
'raw',
'readPreference',
'session',
'readConcern',
'writeConcern',
'raw',
'fieldsAsRaw',
'useBigInt64',
'promoteLongs',
'promoteValues',
'promoteBuffers',
'bsonRegExp',
'serializeFunctions',
'ignoreUndefined',
'enableUtf8Validation'
]);
/** @public
* Configuration options for timeseries collections
* @see https://www.mongodb.com/docs/manual/core/timeseries-collections/
*/
export interface TimeSeriesCollectionOptions extends Document {
timeField: string;
metaField?: string;
granularity?: 'seconds' | 'minutes' | 'hours' | string;
bucketMaxSpanSeconds?: number;
bucketRoundingSeconds?: number;
}
/** @public
* Configuration options for clustered collections
* @see https://www.mongodb.com/docs/manual/core/clustered-collections/
*/
export interface ClusteredCollectionOptions extends Document {
name?: string;
key: Document;
unique: boolean;
}
/** @public */
export interface CreateCollectionOptions extends CommandOperationOptions {
/** Create a capped collection */
capped?: boolean;
/** @deprecated Create an index on the _id field of the document. This option is deprecated in MongoDB 3.2+ and will be removed once no longer supported by the server. */
autoIndexId?: boolean;
/** The size of the capped collection in bytes */
size?: number;
/** The maximum number of documents in the capped collection */
max?: number;
/** Available for the MMAPv1 storage engine only to set the usePowerOf2Sizes and the noPadding flag */
flags?: number;
/** Allows users to specify configuration to the storage engine on a per-collection basis when creating a collection */
storageEngine?: Document;
/** Allows users to specify validation rules or expressions for the collection. For more information, see Document Validation */
validator?: Document;
/** Determines how strictly MongoDB applies the validation rules to existing documents during an update */
validationLevel?: string;
/** Determines whether to error on invalid documents or just warn about the violations but allow invalid documents to be inserted */
validationAction?: string;
/** Allows users to specify a default configuration for indexes when creating a collection */
indexOptionDefaults?: Document;
/** The name of the source collection or view from which to create the view. The name is not the full namespace of the collection or view (i.e., does not include the database name and implies the same database as the view to create) */
viewOn?: string;
/** An array that consists of the aggregation pipeline stage. Creates the view by applying the specified pipeline to the viewOn collection or view */
pipeline?: Document[];
/** A primary key factory function for generation of custom _id keys. */
pkFactory?: PkFactory;
/** A document specifying configuration options for timeseries collections. */
timeseries?: TimeSeriesCollectionOptions;
/** A document specifying configuration options for clustered collections. For MongoDB 5.3 and above. */
clusteredIndex?: ClusteredCollectionOptions;
/** The number of seconds after which a document in a timeseries or clustered collection expires. */
expireAfterSeconds?: number;
/** @experimental */
encryptedFields?: Document;
/**
* If set, enables pre-update and post-update document events to be included for any
* change streams that listen on this collection.
*/
changeStreamPreAndPostImages?: { enabled: boolean };
}
/* @internal */
const INVALID_QE_VERSION =
'Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption.';
/** @internal */
export class CreateCollectionOperation extends CommandOperation<Collection> {
override options: CreateCollectionOptions;
db: Db;
name: string;
constructor(db: Db, name: string, options: CreateCollectionOptions = {}) {
super(db, options);
this.options = options;
this.db = db;
this.name = name;
}
override get commandName() {
return 'create' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<Collection> {
const db = this.db;
const name = this.name;
const options = this.options;
const encryptedFields: Document | undefined =
options.encryptedFields ??
db.client.s.options.autoEncryption?.encryptedFieldsMap?.[`${db.databaseName}.${name}`];
if (encryptedFields) {
// Creating a QE collection required min server of 7.0.0
// TODO(NODE-5353): Get wire version information from connection.
if (
!server.loadBalanced &&
server.description.maxWireVersion < MIN_SUPPORTED_QE_WIRE_VERSION
) {
throw new MongoCompatibilityError(
`${INVALID_QE_VERSION} The minimum server version required is ${MIN_SUPPORTED_QE_SERVER_VERSION}`
);
}
// Create auxilliary collections for queryable encryption support.
const escCollection = encryptedFields.escCollection ?? `enxcol_.${name}.esc`;
const ecocCollection = encryptedFields.ecocCollection ?? `enxcol_.${name}.ecoc`;
for (const collectionName of [escCollection, ecocCollection]) {
const createOp = new CreateCollectionOperation(db, collectionName, {
clusteredIndex: {
key: { _id: 1 },
unique: true
}
});
await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
}
if (!options.encryptedFields) {
this.options = { ...this.options, encryptedFields };
}
}
const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
if (encryptedFields) {
// Create the required index for queryable encryption support.
const createIndexOp = CreateIndexesOperation.fromIndexSpecification(
db,
name,
{ __safeContent__: 1 },
{}
);
await createIndexOp.execute(server, session, timeoutContext);
}
return coll;
}
private async executeWithoutEncryptedFieldsCheck(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<Collection> {
const db = this.db;
const name = this.name;
const options = this.options;
const cmd: Document = { create: name };
for (const n in options) {
if (
(options as any)[n] != null &&
typeof (options as any)[n] !== 'function' &&
!ILLEGAL_COMMAND_FIELDS.has(n)
) {
cmd[n] = (options as any)[n];
}
}
// otherwise just execute the command
await super.executeCommand(server, session, cmd, timeoutContext);
return new Collection(db, name, options);
}
}
defineAspects(CreateCollectionOperation, [Aspect.WRITE_OPERATION]);

View file

@ -0,0 +1,184 @@
import type { Document } from '../bson';
import type { Collection } from '../collection';
import { MongoCompatibilityError, MongoServerError } from '../error';
import { type TODO_NODE_3286 } from '../mongo_types';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { type MongoDBNamespace } from '../utils';
import { type WriteConcernOptions } from '../write_concern';
import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects, type Hint } from './operation';
/** @public */
export interface DeleteOptions extends CommandOperationOptions, WriteConcernOptions {
/** If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails. */
ordered?: boolean;
/** Specifies the collation to use for the operation */
collation?: CollationOptions;
/** Specify that the update query should only consider plans using the hinted index */
hint?: string | Document;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
}
/** @public */
export interface DeleteResult {
/** Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined. */
acknowledged: boolean;
/** The number of documents that were deleted */
deletedCount: number;
}
/** @public */
export interface DeleteStatement {
/** The query that matches documents to delete. */
q: Document;
/** The number of matching documents to delete. */
limit: number;
/** Specifies the collation to use for the operation. */
collation?: CollationOptions;
/** A document or string that specifies the index to use to support the query predicate. */
hint?: Hint;
}
/** @internal */
export class DeleteOperation extends CommandOperation<DeleteResult> {
override options: DeleteOptions;
statements: DeleteStatement[];
constructor(ns: MongoDBNamespace, statements: DeleteStatement[], options: DeleteOptions) {
super(undefined, options);
this.options = options;
this.ns = ns;
this.statements = statements;
}
override get commandName() {
return 'delete' as const;
}
override get canRetryWrite(): boolean {
if (super.canRetryWrite === false) {
return false;
}
return this.statements.every(op => (op.limit != null ? op.limit > 0 : true));
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<DeleteResult> {
const options = this.options ?? {};
const ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
const command: Document = {
delete: this.ns.collection,
deletes: this.statements,
ordered
};
if (options.let) {
command.let = options.let;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (options.comment !== undefined) {
command.comment = options.comment;
}
const unacknowledgedWrite = this.writeConcern && this.writeConcern.w === 0;
if (unacknowledgedWrite) {
if (this.statements.find((o: Document) => o.hint)) {
// TODO(NODE-3541): fix error for hint with unacknowledged writes
throw new MongoCompatibilityError(`hint is not supported with unacknowledged writes`);
}
}
const res: TODO_NODE_3286 = await super.executeCommand(
server,
session,
command,
timeoutContext
);
return res;
}
}
export class DeleteOneOperation extends DeleteOperation {
constructor(collection: Collection, filter: Document, options: DeleteOptions) {
super(collection.s.namespace, [makeDeleteStatement(filter, { ...options, limit: 1 })], options);
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<DeleteResult> {
const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext);
if (this.explain) return res;
if (res.code) throw new MongoServerError(res);
if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]);
return {
acknowledged: this.writeConcern?.w !== 0,
deletedCount: res.n
};
}
}
export class DeleteManyOperation extends DeleteOperation {
constructor(collection: Collection, filter: Document, options: DeleteOptions) {
super(collection.s.namespace, [makeDeleteStatement(filter, options)], options);
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<DeleteResult> {
const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext);
if (this.explain) return res;
if (res.code) throw new MongoServerError(res);
if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]);
return {
acknowledged: this.writeConcern?.w !== 0,
deletedCount: res.n
};
}
}
export function makeDeleteStatement(
filter: Document,
options: DeleteOptions & { limit?: number }
): DeleteStatement {
const op: DeleteStatement = {
q: filter,
limit: typeof options.limit === 'number' ? options.limit : 0
};
if (options.collation) {
op.collation = options.collation;
}
if (options.hint) {
op.hint = options.hint;
}
return op;
}
defineAspects(DeleteOperation, [Aspect.RETRYABLE, Aspect.WRITE_OPERATION]);
defineAspects(DeleteOneOperation, [
Aspect.RETRYABLE,
Aspect.WRITE_OPERATION,
Aspect.EXPLAINABLE,
Aspect.SKIP_COLLATION
]);
defineAspects(DeleteManyOperation, [
Aspect.WRITE_OPERATION,
Aspect.EXPLAINABLE,
Aspect.SKIP_COLLATION
]);

View file

@ -0,0 +1,103 @@
import type { Document } from '../bson';
import type { Collection } from '../collection';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { decorateWithCollation, decorateWithReadConcern } from '../utils';
import { CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects } from './operation';
/** @public */
export type DistinctOptions = CommandOperationOptions & {
/**
* @sinceServerVersion 7.1
*
* The index to use. Specify either the index name as a string or the index key pattern.
* If specified, then the query system will only consider plans using the hinted index.
*
* If provided as a string, `hint` must be index name for an index on the collection.
* If provided as an object, `hint` must be an index description for an index defined on the collection.
*
* See https://www.mongodb.com/docs/manual/reference/command/distinct/#command-fields.
*/
hint?: Document | string;
};
/**
* Return a list of distinct values for the given key across a collection.
* @internal
*/
export class DistinctOperation extends CommandOperation<any[]> {
override options: DistinctOptions;
collection: Collection;
/** Field of the document to find distinct values for. */
key: string;
/** The query for filtering the set of documents to which we apply the distinct filter. */
query: Document;
/**
* Construct a Distinct operation.
*
* @param collection - Collection instance.
* @param key - Field of the document to find distinct values for.
* @param query - The query for filtering the set of documents to which we apply the distinct filter.
* @param options - Optional settings. See Collection.prototype.distinct for a list of options.
*/
constructor(collection: Collection, key: string, query: Document, options?: DistinctOptions) {
super(collection, options);
this.options = options ?? {};
this.collection = collection;
this.key = key;
this.query = query;
}
override get commandName() {
return 'distinct' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<any[]> {
const coll = this.collection;
const key = this.key;
const query = this.query;
const options = this.options;
// Distinct command
const cmd: Document = {
distinct: coll.collectionName,
key: key,
query: query
};
// Add maxTimeMS if defined
if (typeof options.maxTimeMS === 'number') {
cmd.maxTimeMS = options.maxTimeMS;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (typeof options.comment !== 'undefined') {
cmd.comment = options.comment;
}
if (options.hint != null) {
cmd.hint = options.hint;
}
// Do we have a readConcern specified
decorateWithReadConcern(cmd, coll, options);
// Have we specified collation
decorateWithCollation(cmd, coll, options);
const result = await super.executeCommand(server, session, cmd, timeoutContext);
return this.explain ? result : result.values;
}
}
defineAspects(DistinctOperation, [Aspect.READ_OPERATION, Aspect.RETRYABLE, Aspect.EXPLAINABLE]);

View file

@ -0,0 +1,116 @@
import type { Document } from '../bson';
import type { Db } from '../db';
import { MONGODB_ERROR_CODES, MongoServerError } from '../error';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects } from './operation';
/** @public */
export interface DropCollectionOptions extends CommandOperationOptions {
/** @experimental */
encryptedFields?: Document;
}
/** @internal */
export class DropCollectionOperation extends CommandOperation<boolean> {
override options: DropCollectionOptions;
db: Db;
name: string;
constructor(db: Db, name: string, options: DropCollectionOptions = {}) {
super(db, options);
this.db = db;
this.options = options;
this.name = name;
}
override get commandName() {
return 'drop' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<boolean> {
const db = this.db;
const options = this.options;
const name = this.name;
const encryptedFieldsMap = db.client.s.options.autoEncryption?.encryptedFieldsMap;
let encryptedFields: Document | undefined =
options.encryptedFields ?? encryptedFieldsMap?.[`${db.databaseName}.${name}`];
if (!encryptedFields && encryptedFieldsMap) {
// If the MongoClient was configured with an encryptedFieldsMap,
// and no encryptedFields config was available in it or explicitly
// passed as an argument, the spec tells us to look one up using
// listCollections().
const listCollectionsResult = await db
.listCollections({ name }, { nameOnly: false })
.toArray();
encryptedFields = listCollectionsResult?.[0]?.options?.encryptedFields;
}
if (encryptedFields) {
const escCollection = encryptedFields.escCollection || `enxcol_.${name}.esc`;
const ecocCollection = encryptedFields.ecocCollection || `enxcol_.${name}.ecoc`;
for (const collectionName of [escCollection, ecocCollection]) {
// Drop auxilliary collections, ignoring potential NamespaceNotFound errors.
const dropOp = new DropCollectionOperation(db, collectionName);
try {
await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
} catch (err) {
if (
!(err instanceof MongoServerError) ||
err.code !== MONGODB_ERROR_CODES.NamespaceNotFound
) {
throw err;
}
}
}
}
return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
}
private async executeWithoutEncryptedFieldsCheck(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<boolean> {
await super.executeCommand(server, session, { drop: this.name }, timeoutContext);
return true;
}
}
/** @public */
export type DropDatabaseOptions = CommandOperationOptions;
/** @internal */
export class DropDatabaseOperation extends CommandOperation<boolean> {
override options: DropDatabaseOptions;
constructor(db: Db, options: DropDatabaseOptions) {
super(db, options);
this.options = options;
}
override get commandName() {
return 'dropDatabase' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<boolean> {
await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext);
return true;
}
}
defineAspects(DropCollectionOperation, [Aspect.WRITE_OPERATION]);
defineAspects(DropDatabaseOperation, [Aspect.WRITE_OPERATION]);

View file

@ -0,0 +1,61 @@
import type { Document } from '../bson';
import type { Collection } from '../collection';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects } from './operation';
/** @public */
export interface EstimatedDocumentCountOptions extends CommandOperationOptions {
/**
* The maximum amount of time to allow the operation to run.
*
* This option is sent only if the caller explicitly provides a value. The default is to not send a value.
*/
maxTimeMS?: number;
}
/** @internal */
export class EstimatedDocumentCountOperation extends CommandOperation<number> {
override options: EstimatedDocumentCountOptions;
collectionName: string;
constructor(collection: Collection, options: EstimatedDocumentCountOptions = {}) {
super(collection, options);
this.options = options;
this.collectionName = collection.collectionName;
}
override get commandName() {
return 'count' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<number> {
const cmd: Document = { count: this.collectionName };
if (typeof this.options.maxTimeMS === 'number') {
cmd.maxTimeMS = this.options.maxTimeMS;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (this.options.comment !== undefined) {
cmd.comment = this.options.comment;
}
const response = await super.executeCommand(server, session, cmd, timeoutContext);
return response?.n || 0;
}
}
defineAspects(EstimatedDocumentCountOperation, [
Aspect.READ_OPERATION,
Aspect.RETRYABLE,
Aspect.CURSOR_CREATING
]);

View file

@ -0,0 +1,304 @@
import {
isRetryableReadError,
isRetryableWriteError,
MongoCompatibilityError,
MONGODB_ERROR_CODES,
MongoError,
MongoErrorLabel,
MongoExpiredSessionError,
MongoInvalidArgumentError,
MongoNetworkError,
MongoNotConnectedError,
MongoRuntimeError,
MongoServerError,
MongoTransactionError,
MongoUnexpectedServerResponseError
} from '../error';
import type { MongoClient } from '../mongo_client';
import { ReadPreference } from '../read_preference';
import type { ServerDescription } from '../sdam/server_description';
import {
sameServerSelector,
secondaryWritableServerSelector,
type ServerSelector
} from '../sdam/server_selection';
import type { Topology } from '../sdam/topology';
import type { ClientSession } from '../sessions';
import { TimeoutContext } from '../timeout';
import { abortable, supportsRetryableWrites } from '../utils';
import { AbstractOperation, Aspect } from './operation';
const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation;
const MMAPv1_RETRY_WRITES_ERROR_MESSAGE =
'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.';
type ResultTypeFromOperation<TOperation> =
TOperation extends AbstractOperation<infer K> ? K : never;
/**
* Executes the given operation with provided arguments.
* @internal
*
* @remarks
* Allows for a single point of entry to provide features such as implicit sessions, which
* are required by the Driver Sessions specification in the event that a ClientSession is
* not provided.
*
* The expectation is that this function:
* - Connects the MongoClient if it has not already been connected, see {@link autoConnect}
* - Creates a session if none is provided and cleans up the session it creates
* - Tries an operation and retries under certain conditions, see {@link tryOperation}
*
* @typeParam T - The operation's type
* @typeParam TResult - The type of the operation's result, calculated from T
*
* @param client - The MongoClient to execute this operation with
* @param operation - The operation to execute
*/
export async function executeOperation<
T extends AbstractOperation<TResult>,
TResult = ResultTypeFromOperation<T>
>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise<TResult> {
if (!(operation instanceof AbstractOperation)) {
// TODO(NODE-3483): Extend MongoRuntimeError
throw new MongoRuntimeError('This method requires a valid operation instance');
}
const topology =
client.topology == null
? await abortable(autoConnect(client), operation.options)
: client.topology;
// The driver sessions spec mandates that we implicitly create sessions for operations
// that are not explicitly provided with a session.
let session = operation.session;
let owner: symbol | undefined;
if (session == null) {
owner = Symbol();
session = client.startSession({ owner, explicit: false });
} else if (session.hasEnded) {
throw new MongoExpiredSessionError('Use of expired sessions is not permitted');
} else if (session.snapshotEnabled && !topology.capabilities.supportsSnapshotReads) {
throw new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later');
} else if (session.client !== client) {
throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient');
}
const readPreference = operation.readPreference ?? ReadPreference.primary;
const inTransaction = !!session?.inTransaction();
const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION);
if (
inTransaction &&
!readPreference.equals(ReadPreference.primary) &&
(hasReadAspect || operation.commandName === 'runCommand')
) {
throw new MongoTransactionError(
`Read preference in a transaction must be primary, not: ${readPreference.mode}`
);
}
if (session?.isPinned && session.transaction.isCommitted && !operation.bypassPinningCheck) {
session.unpin();
}
timeoutContext ??= TimeoutContext.create({
session,
serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS,
waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS,
timeoutMS: operation.options.timeoutMS
});
try {
return await tryOperation(operation, {
topology,
timeoutContext,
session,
readPreference
});
} finally {
if (session?.owner != null && session.owner === owner) {
await session.endSession();
}
}
}
/**
* Connects a client if it has not yet been connected
* @internal
*/
async function autoConnect(client: MongoClient): Promise<Topology> {
if (client.topology == null) {
if (client.s.hasBeenClosed) {
throw new MongoNotConnectedError('Client must be connected before running operations');
}
client.s.options.__skipPingOnConnect = true;
try {
await client.connect();
if (client.topology == null) {
throw new MongoRuntimeError(
'client.connect did not create a topology but also did not throw'
);
}
return client.topology;
} finally {
delete client.s.options.__skipPingOnConnect;
}
}
return client.topology;
}
/** @internal */
type RetryOptions = {
session: ClientSession | undefined;
readPreference: ReadPreference;
topology: Topology;
timeoutContext: TimeoutContext;
};
/**
* Executes an operation and retries as appropriate
* @internal
*
* @remarks
* Implements behaviour described in [Retryable Reads](https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.md) and [Retryable
* Writes](https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md) specification
*
* This function:
* - performs initial server selection
* - attempts to execute an operation
* - retries the operation if it meets the criteria for a retryable read or a retryable write
*
* @typeParam T - The operation's type
* @typeParam TResult - The type of the operation's result, calculated from T
*
* @param operation - The operation to execute
* */
async function tryOperation<
T extends AbstractOperation<TResult>,
TResult = ResultTypeFromOperation<T>
>(
operation: T,
{ topology, timeoutContext, session, readPreference }: RetryOptions
): Promise<TResult> {
let selector: ReadPreference | ServerSelector;
if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) {
// GetMore and KillCursor operations must always select the same server, but run through
// server selection to potentially force monitor checks if the server is
// in an unknown state.
selector = sameServerSelector(operation.server?.description);
} else if (operation.trySecondaryWrite) {
// If operation should try to write to secondary use the custom server selector
// otherwise provide the read preference.
selector = secondaryWritableServerSelector(topology.commonWireVersion, readPreference);
} else {
selector = readPreference;
}
let server = await topology.selectServer(selector, {
session,
operationName: operation.commandName,
timeoutContext,
signal: operation.options.signal
});
const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION);
const hasWriteAspect = operation.hasAspect(Aspect.WRITE_OPERATION);
const inTransaction = session?.inTransaction() ?? false;
const willRetryRead = topology.s.options.retryReads && !inTransaction && operation.canRetryRead;
const willRetryWrite =
topology.s.options.retryWrites &&
!inTransaction &&
supportsRetryableWrites(server) &&
operation.canRetryWrite;
const willRetry =
operation.hasAspect(Aspect.RETRYABLE) &&
session != null &&
((hasReadAspect && willRetryRead) || (hasWriteAspect && willRetryWrite));
if (hasWriteAspect && willRetryWrite && session != null) {
operation.options.willRetryWrite = true;
session.incrementTransactionNumber();
}
const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1;
let previousOperationError: MongoError | undefined;
let previousServer: ServerDescription | undefined;
for (let tries = 0; tries < maxTries; tries++) {
if (previousOperationError) {
if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) {
throw new MongoServerError({
message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
originalError: previousOperationError
});
}
if (operation.hasAspect(Aspect.COMMAND_BATCHING) && !operation.canRetryWrite) {
throw previousOperationError;
}
if (hasWriteAspect && !isRetryableWriteError(previousOperationError))
throw previousOperationError;
if (hasReadAspect && !isRetryableReadError(previousOperationError))
throw previousOperationError;
if (
previousOperationError instanceof MongoNetworkError &&
operation.hasAspect(Aspect.CURSOR_CREATING) &&
session != null &&
session.isPinned &&
!session.inTransaction()
) {
session.unpin({ force: true, forceClear: true });
}
server = await topology.selectServer(selector, {
session,
operationName: operation.commandName,
previousServer,
signal: operation.options.signal
});
if (hasWriteAspect && !supportsRetryableWrites(server)) {
throw new MongoUnexpectedServerResponseError(
'Selected server does not support retryable writes'
);
}
}
try {
// If tries > 0 and we are command batching we need to reset the batch.
if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) {
operation.resetBatch();
}
return await operation.execute(server, session, timeoutContext);
} catch (operationError) {
if (!(operationError instanceof MongoError)) throw operationError;
if (
previousOperationError != null &&
operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed)
) {
throw previousOperationError;
}
previousServer = server.description;
previousOperationError = operationError;
// Reset timeouts
timeoutContext.clear();
}
}
throw (
previousOperationError ??
new MongoRuntimeError('Tried to propagate retryability error, but no error was found.')
);
}

View file

@ -0,0 +1,279 @@
import type { Document } from '../bson';
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor';
import { MongoInvalidArgumentError } from '../error';
import {
decorateWithExplain,
type ExplainOptions,
validateExplainTimeoutOptions
} from '../explain';
import { ReadConcern } from '../read_concern';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { formatSort, type Sort } from '../sort';
import { type TimeoutContext } from '../timeout';
import { type MongoDBNamespace, normalizeHintField } from '../utils';
import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects, type Hint } from './operation';
/**
* @public
* @typeParam TSchema - Unused schema definition, deprecated usage, only specify `FindOptions` with no generic
*/
// eslint-disable-next-line @typescript-eslint/no-unused-vars
export interface FindOptions<TSchema extends Document = Document>
extends Omit<CommandOperationOptions, 'writeConcern' | 'explain'>,
AbstractCursorOptions {
/** Sets the limit of documents returned in the query. */
limit?: number;
/** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */
sort?: Sort;
/** The fields to return in the query. Object of fields to either include or exclude (one of, not both), `{'a':1, 'b': 1}` **or** `{'a': 0, 'b': 0}` */
projection?: Document;
/** Set to skip N documents ahead in your query (useful for pagination). */
skip?: number;
/** Tell the query to use specific indexes in the query. Object of indexes to use, `{'_id':1}` */
hint?: Hint;
/** Specify if the cursor can timeout. */
timeout?: boolean;
/** Specify if the cursor is tailable. */
tailable?: boolean;
/** Specify if the cursor is a tailable-await cursor. Requires `tailable` to be true */
awaitData?: boolean;
/** Set the batchSize for the getMoreCommand when iterating over the query results. */
batchSize?: number;
/** If true, returns only the index keys in the resulting documents. */
returnKey?: boolean;
/** The inclusive lower bound for a specific index */
min?: Document;
/** The exclusive upper bound for a specific index */
max?: Document;
/** Number of milliseconds to wait before aborting the query. */
maxTimeMS?: number;
/** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. Requires `tailable` and `awaitData` to be true */
maxAwaitTimeMS?: number;
/** The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to prevent that. */
noCursorTimeout?: boolean;
/** Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields). */
collation?: CollationOptions;
/** Allows disk use for blocking sort operations exceeding 100MB memory. (MongoDB 3.2 or higher) */
allowDiskUse?: boolean;
/** Determines whether to close the cursor after the first batch. Defaults to false. */
singleBatch?: boolean;
/** For queries against a sharded collection, allows the command (or subsequent getMore commands) to return partial results, rather than an error, if one or more queried shards are unavailable. */
allowPartialResults?: boolean;
/** Determines whether to return the record identifier for each document. If true, adds a field $recordId to the returned documents. */
showRecordId?: boolean;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
/**
* Option to enable an optimized code path for queries looking for a particular range of `ts` values in the oplog. Requires `tailable` to be true.
* @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored.
*/
oplogReplay?: boolean;
/**
* Specifies the verbosity mode for the explain output.
* @deprecated This API is deprecated in favor of `collection.find().explain()`.
*/
explain?: ExplainOptions['explain'];
/** @internal*/
timeoutMode?: CursorTimeoutMode;
}
/** @internal */
export class FindOperation extends CommandOperation<CursorResponse> {
/**
* @remarks WriteConcern can still be present on the options because
* we inherit options from the client/db/collection. The
* key must be present on the options in order to delete it.
* This allows typescript to delete the key but will
* not allow a writeConcern to be assigned as a property on options.
*/
override options: FindOptions & { writeConcern?: never };
filter: Document;
constructor(ns: MongoDBNamespace, filter: Document = {}, options: FindOptions = {}) {
super(undefined, options);
this.options = { ...options };
delete this.options.writeConcern;
this.ns = ns;
if (typeof filter !== 'object' || Array.isArray(filter)) {
throw new MongoInvalidArgumentError('Query filter must be a plain object or ObjectId');
}
// special case passing in an ObjectId as a filter
this.filter = filter != null && filter._bsontype === 'ObjectId' ? { _id: filter } : filter;
}
override get commandName() {
return 'find' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<CursorResponse> {
this.server = server;
const options = this.options;
let findCommand = makeFindCommand(this.ns, this.filter, options);
if (this.explain) {
validateExplainTimeoutOptions(this.options, this.explain);
findCommand = decorateWithExplain(findCommand, this.explain);
}
return await server.command(
this.ns,
findCommand,
{
...this.options,
...this.bsonOptions,
documentsReturnedIn: 'firstBatch',
session,
timeoutContext
},
this.explain ? ExplainedCursorResponse : CursorResponse
);
}
}
function makeFindCommand(ns: MongoDBNamespace, filter: Document, options: FindOptions): Document {
const findCommand: Document = {
find: ns.collection,
filter
};
if (options.sort) {
findCommand.sort = formatSort(options.sort);
}
if (options.projection) {
let projection = options.projection;
if (projection && Array.isArray(projection)) {
projection = projection.length
? projection.reduce((result, field) => {
result[field] = 1;
return result;
}, {})
: { _id: 1 };
}
findCommand.projection = projection;
}
if (options.hint) {
findCommand.hint = normalizeHintField(options.hint);
}
if (typeof options.skip === 'number') {
findCommand.skip = options.skip;
}
if (typeof options.limit === 'number') {
if (options.limit < 0) {
findCommand.limit = -options.limit;
findCommand.singleBatch = true;
} else {
findCommand.limit = options.limit;
}
}
if (typeof options.batchSize === 'number') {
if (options.batchSize < 0) {
if (
options.limit &&
options.limit !== 0 &&
Math.abs(options.batchSize) < Math.abs(options.limit)
) {
findCommand.limit = -options.batchSize;
}
findCommand.singleBatch = true;
} else {
findCommand.batchSize = options.batchSize;
}
}
if (typeof options.singleBatch === 'boolean') {
findCommand.singleBatch = options.singleBatch;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (options.comment !== undefined) {
findCommand.comment = options.comment;
}
if (typeof options.maxTimeMS === 'number') {
findCommand.maxTimeMS = options.maxTimeMS;
}
const readConcern = ReadConcern.fromOptions(options);
if (readConcern) {
findCommand.readConcern = readConcern.toJSON();
}
if (options.max) {
findCommand.max = options.max;
}
if (options.min) {
findCommand.min = options.min;
}
if (typeof options.returnKey === 'boolean') {
findCommand.returnKey = options.returnKey;
}
if (typeof options.showRecordId === 'boolean') {
findCommand.showRecordId = options.showRecordId;
}
if (typeof options.tailable === 'boolean') {
findCommand.tailable = options.tailable;
}
if (typeof options.oplogReplay === 'boolean') {
findCommand.oplogReplay = options.oplogReplay;
}
if (typeof options.timeout === 'boolean') {
findCommand.noCursorTimeout = !options.timeout;
} else if (typeof options.noCursorTimeout === 'boolean') {
findCommand.noCursorTimeout = options.noCursorTimeout;
}
if (typeof options.awaitData === 'boolean') {
findCommand.awaitData = options.awaitData;
}
if (typeof options.allowPartialResults === 'boolean') {
findCommand.allowPartialResults = options.allowPartialResults;
}
if (options.collation) {
findCommand.collation = options.collation;
}
if (typeof options.allowDiskUse === 'boolean') {
findCommand.allowDiskUse = options.allowDiskUse;
}
if (options.let) {
findCommand.let = options.let;
}
return findCommand;
}
defineAspects(FindOperation, [
Aspect.READ_OPERATION,
Aspect.RETRYABLE,
Aspect.EXPLAINABLE,
Aspect.CURSOR_CREATING
]);

View file

@ -0,0 +1,294 @@
import type { Document } from '../bson';
import type { Collection } from '../collection';
import { MongoCompatibilityError, MongoInvalidArgumentError } from '../error';
import { ReadPreference } from '../read_preference';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { formatSort, type Sort, type SortForCmd } from '../sort';
import { type TimeoutContext } from '../timeout';
import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils';
import { type WriteConcern, type WriteConcernSettings } from '../write_concern';
import { CommandOperation, type CommandOperationOptions } from './command';
import { Aspect, defineAspects } from './operation';
/** @public */
export const ReturnDocument = Object.freeze({
BEFORE: 'before',
AFTER: 'after'
} as const);
/** @public */
export type ReturnDocument = (typeof ReturnDocument)[keyof typeof ReturnDocument];
/** @public */
export interface FindOneAndDeleteOptions extends CommandOperationOptions {
/** An optional hint for query optimization. See the {@link https://www.mongodb.com/docs/manual/reference/command/update/#update-command-hint|update command} reference for more information.*/
hint?: Document;
/** Limits the fields to return for all matching documents. */
projection?: Document;
/** Determines which document the operation modifies if the query selects multiple documents. */
sort?: Sort;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
/**
* Return the ModifyResult instead of the modified document. Defaults to false
*/
includeResultMetadata?: boolean;
}
/** @public */
export interface FindOneAndReplaceOptions extends CommandOperationOptions {
/** Allow driver to bypass schema validation. */
bypassDocumentValidation?: boolean;
/** An optional hint for query optimization. See the {@link https://www.mongodb.com/docs/manual/reference/command/update/#update-command-hint|update command} reference for more information.*/
hint?: Document;
/** Limits the fields to return for all matching documents. */
projection?: Document;
/** When set to 'after', returns the updated document rather than the original. The default is 'before'. */
returnDocument?: ReturnDocument;
/** Determines which document the operation modifies if the query selects multiple documents. */
sort?: Sort;
/** Upsert the document if it does not exist. */
upsert?: boolean;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
/**
* Return the ModifyResult instead of the modified document. Defaults to false
*/
includeResultMetadata?: boolean;
}
/** @public */
export interface FindOneAndUpdateOptions extends CommandOperationOptions {
/** Optional list of array filters referenced in filtered positional operators */
arrayFilters?: Document[];
/** Allow driver to bypass schema validation. */
bypassDocumentValidation?: boolean;
/** An optional hint for query optimization. See the {@link https://www.mongodb.com/docs/manual/reference/command/update/#update-command-hint|update command} reference for more information.*/
hint?: Document;
/** Limits the fields to return for all matching documents. */
projection?: Document;
/** When set to 'after', returns the updated document rather than the original. The default is 'before'. */
returnDocument?: ReturnDocument;
/** Determines which document the operation modifies if the query selects multiple documents. */
sort?: Sort;
/** Upsert the document if it does not exist. */
upsert?: boolean;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
/**
* Return the ModifyResult instead of the modified document. Defaults to false
*/
includeResultMetadata?: boolean;
}
/** @internal */
interface FindAndModifyCmdBase {
remove: boolean;
new: boolean;
upsert: boolean;
update?: Document;
sort?: SortForCmd;
fields?: Document;
bypassDocumentValidation?: boolean;
arrayFilters?: Document[];
maxTimeMS?: number;
let?: Document;
writeConcern?: WriteConcern | WriteConcernSettings;
/**
* Comment to apply to the operation.
*
* In server versions pre-4.4, 'comment' must be string. A server
* error will be thrown if any other type is provided.
*
* In server versions 4.4 and above, 'comment' can be any valid BSON type.
*/
comment?: unknown;
}
function configureFindAndModifyCmdBaseUpdateOpts(
cmdBase: FindAndModifyCmdBase,
options: FindOneAndReplaceOptions | FindOneAndUpdateOptions
): FindAndModifyCmdBase {
cmdBase.new = options.returnDocument === ReturnDocument.AFTER;
cmdBase.upsert = options.upsert === true;
if (options.bypassDocumentValidation === true) {
cmdBase.bypassDocumentValidation = options.bypassDocumentValidation;
}
return cmdBase;
}
/** @internal */
export class FindAndModifyOperation extends CommandOperation<Document> {
override options: FindOneAndReplaceOptions | FindOneAndUpdateOptions | FindOneAndDeleteOptions;
cmdBase: FindAndModifyCmdBase;
collection: Collection;
query: Document;
doc?: Document;
constructor(
collection: Collection,
query: Document,
options: FindOneAndReplaceOptions | FindOneAndUpdateOptions | FindOneAndDeleteOptions
) {
super(collection, options);
this.options = options ?? {};
this.cmdBase = {
remove: false,
new: false,
upsert: false
};
options.includeResultMetadata ??= false;
const sort = formatSort(options.sort);
if (sort) {
this.cmdBase.sort = sort;
}
if (options.projection) {
this.cmdBase.fields = options.projection;
}
if (options.maxTimeMS) {
this.cmdBase.maxTimeMS = options.maxTimeMS;
}
// Decorate the findAndModify command with the write Concern
if (options.writeConcern) {
this.cmdBase.writeConcern = options.writeConcern;
}
if (options.let) {
this.cmdBase.let = options.let;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (options.comment !== undefined) {
this.cmdBase.comment = options.comment;
}
// force primary read preference
this.readPreference = ReadPreference.primary;
this.collection = collection;
this.query = query;
}
override get commandName() {
return 'findAndModify' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<Document> {
const coll = this.collection;
const query = this.query;
const options = { ...this.options, ...this.bsonOptions };
// Create findAndModify command object
const cmd: Document = {
findAndModify: coll.collectionName,
query: query,
...this.cmdBase
};
decorateWithCollation(cmd, coll, options);
if (options.hint) {
// TODO: once this method becomes a CommandOperation we will have the server
// in place to check.
const unacknowledgedWrite = this.writeConcern?.w === 0;
if (unacknowledgedWrite || maxWireVersion(server) < 8) {
throw new MongoCompatibilityError(
'The current topology does not support a hint on findAndModify commands'
);
}
cmd.hint = options.hint;
}
// Execute the command
const result = await super.executeCommand(server, session, cmd, timeoutContext);
return options.includeResultMetadata ? result : (result.value ?? null);
}
}
/** @internal */
export class FindOneAndDeleteOperation extends FindAndModifyOperation {
constructor(collection: Collection, filter: Document, options: FindOneAndDeleteOptions) {
// Basic validation
if (filter == null || typeof filter !== 'object') {
throw new MongoInvalidArgumentError('Argument "filter" must be an object');
}
super(collection, filter, options);
this.cmdBase.remove = true;
}
}
/** @internal */
export class FindOneAndReplaceOperation extends FindAndModifyOperation {
constructor(
collection: Collection,
filter: Document,
replacement: Document,
options: FindOneAndReplaceOptions
) {
if (filter == null || typeof filter !== 'object') {
throw new MongoInvalidArgumentError('Argument "filter" must be an object');
}
if (replacement == null || typeof replacement !== 'object') {
throw new MongoInvalidArgumentError('Argument "replacement" must be an object');
}
if (hasAtomicOperators(replacement)) {
throw new MongoInvalidArgumentError('Replacement document must not contain atomic operators');
}
super(collection, filter, options);
this.cmdBase.update = replacement;
configureFindAndModifyCmdBaseUpdateOpts(this.cmdBase, options);
}
}
/** @internal */
export class FindOneAndUpdateOperation extends FindAndModifyOperation {
constructor(
collection: Collection,
filter: Document,
update: Document,
options: FindOneAndUpdateOptions
) {
if (filter == null || typeof filter !== 'object') {
throw new MongoInvalidArgumentError('Argument "filter" must be an object');
}
if (update == null || typeof update !== 'object') {
throw new MongoInvalidArgumentError('Argument "update" must be an object');
}
if (!hasAtomicOperators(update)) {
throw new MongoInvalidArgumentError('Update document requires atomic operators');
}
super(collection, filter, options);
this.cmdBase.update = update;
configureFindAndModifyCmdBaseUpdateOpts(this.cmdBase, options);
if (options.arrayFilters) {
this.cmdBase.arrayFilters = options.arrayFilters;
}
}
}
defineAspects(FindAndModifyOperation, [
Aspect.WRITE_OPERATION,
Aspect.RETRYABLE,
Aspect.EXPLAINABLE
]);

View file

@ -0,0 +1,110 @@
import type { Long } from '../bson';
import { CursorResponse } from '../cmap/wire_protocol/responses';
import { MongoRuntimeError } from '../error';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { maxWireVersion, type MongoDBNamespace } from '../utils';
import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation';
/** @internal */
export interface GetMoreOptions extends OperationOptions {
/** Set the batchSize for the getMoreCommand when iterating over the query results. */
batchSize?: number;
/**
* Comment to apply to the operation.
*
* getMore only supports 'comment' in server versions 4.4 and above.
*/
comment?: unknown;
/** Number of milliseconds to wait before aborting the query. */
maxTimeMS?: number;
/** TODO(NODE-4413): Address bug with maxAwaitTimeMS not being passed in from the cursor correctly */
maxAwaitTimeMS?: number;
}
/**
* GetMore command: https://www.mongodb.com/docs/manual/reference/command/getMore/
* @internal
*/
export interface GetMoreCommand {
getMore: Long;
collection: string;
batchSize?: number;
maxTimeMS?: number;
/** Only supported on wire versions 10 or greater */
comment?: unknown;
}
/** @internal */
export class GetMoreOperation extends AbstractOperation {
cursorId: Long;
override options: GetMoreOptions;
constructor(ns: MongoDBNamespace, cursorId: Long, server: Server, options: GetMoreOptions) {
super(options);
this.options = options;
this.ns = ns;
this.cursorId = cursorId;
this.server = server;
}
override get commandName() {
return 'getMore' as const;
}
/**
* Although there is a server already associated with the get more operation, the signature
* for execute passes a server so we will just use that one.
*/
override async execute(
server: Server,
_session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<CursorResponse> {
if (server !== this.server) {
throw new MongoRuntimeError('Getmore must run on the same server operation began on');
}
if (this.cursorId == null || this.cursorId.isZero()) {
throw new MongoRuntimeError('Unable to iterate cursor with no id');
}
const collection = this.ns.collection;
if (collection == null) {
// Cursors should have adopted the namespace returned by MongoDB
// which should always defined a collection name (even a pseudo one, ex. db.aggregate())
throw new MongoRuntimeError('A collection name must be determined before getMore');
}
const getMoreCmd: GetMoreCommand = {
getMore: this.cursorId,
collection
};
if (typeof this.options.batchSize === 'number') {
getMoreCmd.batchSize = Math.abs(this.options.batchSize);
}
if (typeof this.options.maxAwaitTimeMS === 'number') {
getMoreCmd.maxTimeMS = this.options.maxAwaitTimeMS;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (this.options.comment !== undefined && maxWireVersion(server) >= 9) {
getMoreCmd.comment = this.options.comment;
}
const commandOptions = {
returnFieldSelector: null,
documentsReturnedIn: 'nextBatch',
timeoutContext,
...this.options
};
return await server.command(this.ns, getMoreCmd, commandOptions, CursorResponse);
}
}
defineAspects(GetMoreOperation, [Aspect.READ_OPERATION, Aspect.MUST_SELECT_SAME_SERVER]);

View file

@ -0,0 +1,419 @@
import type { Document } from '../bson';
import { CursorResponse } from '../cmap/wire_protocol/responses';
import type { Collection } from '../collection';
import { type AbstractCursorOptions } from '../cursor/abstract_cursor';
import { MongoCompatibilityError } from '../error';
import { type OneOrMore } from '../mongo_types';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils';
import {
type CollationOptions,
CommandOperation,
type CommandOperationOptions,
type OperationParent
} from './command';
import { Aspect, defineAspects } from './operation';
const VALID_INDEX_OPTIONS = new Set([
'background',
'unique',
'name',
'partialFilterExpression',
'sparse',
'hidden',
'expireAfterSeconds',
'storageEngine',
'collation',
'version',
// text indexes
'weights',
'default_language',
'language_override',
'textIndexVersion',
// 2d-sphere indexes
'2dsphereIndexVersion',
// 2d indexes
'bits',
'min',
'max',
// geoHaystack Indexes
'bucketSize',
// wildcard indexes
'wildcardProjection'
]);
/** @public */
export type IndexDirection =
| -1
| 1
| '2d'
| '2dsphere'
| 'text'
| 'geoHaystack'
| 'hashed'
| number;
function isIndexDirection(x: unknown): x is IndexDirection {
return (
typeof x === 'number' || x === '2d' || x === '2dsphere' || x === 'text' || x === 'geoHaystack'
);
}
/** @public */
export type IndexSpecification = OneOrMore<
| string
| [string, IndexDirection]
| { [key: string]: IndexDirection }
| Map<string, IndexDirection>
>;
/** @public */
export interface IndexInformationOptions extends ListIndexesOptions {
/**
* When `true`, an array of index descriptions is returned.
* When `false`, the driver returns an object that with keys corresponding to index names with values
* corresponding to the entries of the indexes' key.
*
* For example, the given the following indexes:
* ```
* [ { name: 'a_1', key: { a: 1 } }, { name: 'b_1_c_1' , key: { b: 1, c: 1 } }]
* ```
*
* When `full` is `true`, the above array is returned. When `full` is `false`, the following is returned:
* ```
* {
* 'a_1': [['a', 1]],
* 'b_1_c_1': [['b', 1], ['c', 1]],
* }
* ```
*/
full?: boolean;
}
/** @public */
export interface IndexDescription
extends Pick<
CreateIndexesOptions,
| 'background'
| 'unique'
| 'partialFilterExpression'
| 'sparse'
| 'hidden'
| 'expireAfterSeconds'
| 'storageEngine'
| 'version'
| 'weights'
| 'default_language'
| 'language_override'
| 'textIndexVersion'
| '2dsphereIndexVersion'
| 'bits'
| 'min'
| 'max'
| 'bucketSize'
| 'wildcardProjection'
> {
collation?: CollationOptions;
name?: string;
key: { [key: string]: IndexDirection } | Map<string, IndexDirection>;
}
/** @public */
export interface CreateIndexesOptions extends Omit<CommandOperationOptions, 'writeConcern'> {
/** Creates the index in the background, yielding whenever possible. */
background?: boolean;
/** Creates an unique index. */
unique?: boolean;
/** Override the autogenerated index name (useful if the resulting name is larger than 128 bytes) */
name?: string;
/** Creates a partial index based on the given filter object (MongoDB 3.2 or higher) */
partialFilterExpression?: Document;
/** Creates a sparse index. */
sparse?: boolean;
/** Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher) */
expireAfterSeconds?: number;
/** Allows users to configure the storage engine on a per-index basis when creating an index. (MongoDB 3.0 or higher) */
storageEngine?: Document;
/** (MongoDB 4.4. or higher) Specifies how many data-bearing members of a replica set, including the primary, must complete the index builds successfully before the primary marks the indexes as ready. This option accepts the same values for the "w" field in a write concern plus "votingMembers", which indicates all voting data-bearing nodes. */
commitQuorum?: number | string;
/** Specifies the index version number, either 0 or 1. */
version?: number;
// text indexes
weights?: Document;
default_language?: string;
language_override?: string;
textIndexVersion?: number;
// 2d-sphere indexes
'2dsphereIndexVersion'?: number;
// 2d indexes
bits?: number;
/** For geospatial indexes set the lower bound for the co-ordinates. */
min?: number;
/** For geospatial indexes set the high bound for the co-ordinates. */
max?: number;
// geoHaystack Indexes
bucketSize?: number;
// wildcard indexes
wildcardProjection?: Document;
/** Specifies that the index should exist on the target collection but should not be used by the query planner when executing operations. (MongoDB 4.4 or higher) */
hidden?: boolean;
}
function isSingleIndexTuple(t: unknown): t is [string, IndexDirection] {
return Array.isArray(t) && t.length === 2 && isIndexDirection(t[1]);
}
/**
* Converts an `IndexSpecification`, which can be specified in multiple formats, into a
* valid `key` for the createIndexes command.
*/
function constructIndexDescriptionMap(indexSpec: IndexSpecification): Map<string, IndexDirection> {
const key: Map<string, IndexDirection> = new Map();
const indexSpecs =
!Array.isArray(indexSpec) || isSingleIndexTuple(indexSpec) ? [indexSpec] : indexSpec;
// Iterate through array and handle different types
for (const spec of indexSpecs) {
if (typeof spec === 'string') {
key.set(spec, 1);
} else if (Array.isArray(spec)) {
key.set(spec[0], spec[1] ?? 1);
} else if (spec instanceof Map) {
for (const [property, value] of spec) {
key.set(property, value);
}
} else if (isObject(spec)) {
for (const [property, value] of Object.entries(spec)) {
key.set(property, value);
}
}
}
return key;
}
/**
* Receives an index description and returns a modified index description which has had invalid options removed
* from the description and has mapped the `version` option to the `v` option.
*/
function resolveIndexDescription(
description: IndexDescription
): Omit<ResolvedIndexDescription, 'key'> {
const validProvidedOptions = Object.entries(description).filter(([optionName]) =>
VALID_INDEX_OPTIONS.has(optionName)
);
return Object.fromEntries(
// we support the `version` option, but the `createIndexes` command expects it to be the `v`
validProvidedOptions.map(([name, value]) => (name === 'version' ? ['v', value] : [name, value]))
);
}
/**
* @public
* The index information returned by the listIndexes command. https://www.mongodb.com/docs/manual/reference/command/listIndexes/#mongodb-dbcommand-dbcmd.listIndexes
*/
export type IndexDescriptionInfo = Omit<IndexDescription, 'key' | 'version'> & {
key: { [key: string]: IndexDirection };
v?: IndexDescription['version'];
} & Document;
/** @public */
export type IndexDescriptionCompact = Record<string, [name: string, direction: IndexDirection][]>;
/**
* @internal
*
* Internally, the driver represents index description keys with `Map`s to preserve key ordering.
* We don't require users to specify maps, so we transform user provided descriptions into
* "resolved" by converting the `key` into a JS `Map`, if it isn't already a map.
*
* Additionally, we support the `version` option, but the `createIndexes` command uses the field `v`
* to specify the index version so we map the value of `version` to `v`, if provided.
*/
type ResolvedIndexDescription = Omit<IndexDescription, 'key' | 'version'> & {
key: Map<string, IndexDirection>;
v?: IndexDescription['version'];
};
/** @internal */
export class CreateIndexesOperation extends CommandOperation<string[]> {
override options: CreateIndexesOptions;
collectionName: string;
indexes: ReadonlyArray<ResolvedIndexDescription>;
private constructor(
parent: OperationParent,
collectionName: string,
indexes: IndexDescription[],
options?: CreateIndexesOptions
) {
super(parent, options);
this.options = options ?? {};
this.collectionName = collectionName;
this.indexes = indexes.map((userIndex: IndexDescription): ResolvedIndexDescription => {
// Ensure the key is a Map to preserve index key ordering
const key =
userIndex.key instanceof Map ? userIndex.key : new Map(Object.entries(userIndex.key));
const name = userIndex.name ?? Array.from(key).flat().join('_');
const validIndexOptions = resolveIndexDescription(userIndex);
return {
...validIndexOptions,
name,
key
};
});
}
static fromIndexDescriptionArray(
parent: OperationParent,
collectionName: string,
indexes: IndexDescription[],
options?: CreateIndexesOptions
): CreateIndexesOperation {
return new CreateIndexesOperation(parent, collectionName, indexes, options);
}
static fromIndexSpecification(
parent: OperationParent,
collectionName: string,
indexSpec: IndexSpecification,
options: CreateIndexesOptions = {}
): CreateIndexesOperation {
const key = constructIndexDescriptionMap(indexSpec);
const description: IndexDescription = { ...options, key };
return new CreateIndexesOperation(parent, collectionName, [description], options);
}
override get commandName() {
return 'createIndexes';
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<string[]> {
const options = this.options;
const indexes = this.indexes;
const serverWireVersion = maxWireVersion(server);
const cmd: Document = { createIndexes: this.collectionName, indexes };
if (options.commitQuorum != null) {
if (serverWireVersion < 9) {
throw new MongoCompatibilityError(
'Option `commitQuorum` for `createIndexes` not supported on servers < 4.4'
);
}
cmd.commitQuorum = options.commitQuorum;
}
// collation is set on each index, it should not be defined at the root
this.options.collation = undefined;
await super.executeCommand(server, session, cmd, timeoutContext);
const indexNames = indexes.map(index => index.name || '');
return indexNames;
}
}
/** @public */
export type DropIndexesOptions = CommandOperationOptions;
/** @internal */
export class DropIndexOperation extends CommandOperation<Document> {
override options: DropIndexesOptions;
collection: Collection;
indexName: string;
constructor(collection: Collection, indexName: string, options?: DropIndexesOptions) {
super(collection, options);
this.options = options ?? {};
this.collection = collection;
this.indexName = indexName;
}
override get commandName() {
return 'dropIndexes' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<Document> {
const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName };
return await super.executeCommand(server, session, cmd, timeoutContext);
}
}
/** @public */
export type ListIndexesOptions = AbstractCursorOptions & {
/** @internal */
omitMaxTimeMS?: boolean;
};
/** @internal */
export class ListIndexesOperation extends CommandOperation<CursorResponse> {
/**
* @remarks WriteConcern can still be present on the options because
* we inherit options from the client/db/collection. The
* key must be present on the options in order to delete it.
* This allows typescript to delete the key but will
* not allow a writeConcern to be assigned as a property on options.
*/
override options: ListIndexesOptions & { writeConcern?: never };
collectionNamespace: MongoDBNamespace;
constructor(collection: Collection, options?: ListIndexesOptions) {
super(collection, options);
this.options = { ...options };
delete this.options.writeConcern;
this.collectionNamespace = collection.s.namespace;
}
override get commandName() {
return 'listIndexes' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<CursorResponse> {
const serverWireVersion = maxWireVersion(server);
const cursor = this.options.batchSize ? { batchSize: this.options.batchSize } : {};
const command: Document = { listIndexes: this.collectionNamespace.collection, cursor };
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (serverWireVersion >= 9 && this.options.comment !== undefined) {
command.comment = this.options.comment;
}
return await super.executeCommand(server, session, command, timeoutContext, CursorResponse);
}
}
defineAspects(ListIndexesOperation, [
Aspect.READ_OPERATION,
Aspect.RETRYABLE,
Aspect.CURSOR_CREATING
]);
defineAspects(CreateIndexesOperation, [Aspect.WRITE_OPERATION]);
defineAspects(DropIndexOperation, [Aspect.WRITE_OPERATION]);

View file

@ -0,0 +1,166 @@
import type { Document } from '../bson';
import type { BulkWriteOptions } from '../bulk/common';
import type { Collection } from '../collection';
import { MongoInvalidArgumentError, MongoServerError } from '../error';
import type { InferIdType } from '../mongo_types';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { type TimeoutContext } from '../timeout';
import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils';
import { WriteConcern } from '../write_concern';
import { BulkWriteOperation } from './bulk_write';
import { CommandOperation, type CommandOperationOptions } from './command';
import { AbstractOperation, Aspect, defineAspects } from './operation';
/** @internal */
export class InsertOperation extends CommandOperation<Document> {
override options: BulkWriteOptions;
documents: Document[];
constructor(ns: MongoDBNamespace, documents: Document[], options: BulkWriteOptions) {
super(undefined, options);
this.options = { ...options, checkKeys: options.checkKeys ?? false };
this.ns = ns;
this.documents = documents;
}
override get commandName() {
return 'insert' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<Document> {
const options = this.options ?? {};
const ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
const command: Document = {
insert: this.ns.collection,
documents: this.documents,
ordered
};
if (typeof options.bypassDocumentValidation === 'boolean') {
command.bypassDocumentValidation = options.bypassDocumentValidation;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (options.comment !== undefined) {
command.comment = options.comment;
}
return await super.executeCommand(server, session, command, timeoutContext);
}
}
/** @public */
export interface InsertOneOptions extends CommandOperationOptions {
/** Allow driver to bypass schema validation. */
bypassDocumentValidation?: boolean;
/** Force server to assign _id values instead of driver. */
forceServerObjectId?: boolean;
}
/** @public */
export interface InsertOneResult<TSchema = Document> {
/** Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined */
acknowledged: boolean;
/** The identifier that was inserted. If the server generated the identifier, this value will be null as the driver does not have access to that data */
insertedId: InferIdType<TSchema>;
}
export class InsertOneOperation extends InsertOperation {
constructor(collection: Collection, doc: Document, options: InsertOneOptions) {
super(collection.s.namespace, maybeAddIdToDocuments(collection, [doc], options), options);
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<InsertOneResult> {
const res = await super.execute(server, session, timeoutContext);
if (res.code) throw new MongoServerError(res);
if (res.writeErrors) {
// This should be a WriteError but we can't change it now because of error hierarchy
throw new MongoServerError(res.writeErrors[0]);
}
return {
acknowledged: this.writeConcern?.w !== 0,
insertedId: this.documents[0]._id
};
}
}
/** @public */
export interface InsertManyResult<TSchema = Document> {
/** Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined */
acknowledged: boolean;
/** The number of inserted documents for this operations */
insertedCount: number;
/** Map of the index of the inserted document to the id of the inserted document */
insertedIds: { [key: number]: InferIdType<TSchema> };
}
/** @internal */
export class InsertManyOperation extends AbstractOperation<InsertManyResult> {
override options: BulkWriteOptions;
collection: Collection;
docs: ReadonlyArray<Document>;
constructor(collection: Collection, docs: ReadonlyArray<Document>, options: BulkWriteOptions) {
super(options);
if (!Array.isArray(docs)) {
throw new MongoInvalidArgumentError('Argument "docs" must be an array of documents');
}
this.options = options;
this.collection = collection;
this.docs = docs;
}
override get commandName() {
return 'insert' as const;
}
override async execute(
server: Server,
session: ClientSession | undefined,
timeoutContext: TimeoutContext
): Promise<InsertManyResult> {
const coll = this.collection;
const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference };
const writeConcern = WriteConcern.fromOptions(options);
const bulkWriteOperation = new BulkWriteOperation(
coll,
this.docs.map(document => ({
insertOne: { document }
})),
options
);
try {
const res = await bulkWriteOperation.execute(server, session, timeoutContext);
return {
acknowledged: writeConcern?.w !== 0,
insertedCount: res.insertedCount,
insertedIds: res.insertedIds
};
} catch (err) {
if (err && err.message === 'Operation must be an object with an operation key') {
throw new MongoInvalidArgumentError(
'Collection.insertMany() cannot be called with an array that has null/undefined values'
);
}
throw err;
}
}
}
defineAspects(InsertOperation, [Aspect.RETRYABLE, Aspect.WRITE_OPERATION]);
defineAspects(InsertOneOperation, [Aspect.RETRYABLE, Aspect.WRITE_OPERATION]);
defineAspects(InsertManyOperation, [Aspect.WRITE_OPERATION]);

View file

@ -0,0 +1,35 @@
import type { Collection } from '../collection';
import { MongoAPIError } from '../error';
import type { Server } from '../sdam/server';
import type { ClientSession } from '../sessions';
import { AbstractOperation, type OperationOptions } from './operation';
/** @internal */
export class IsCappedOperation extends AbstractOperation<boolean> {
override options: OperationOptions;
collection: Collection;
constructor(collection: Collection, options: OperationOptions) {
super(options);
this.options = options;
this.collection = collection;
}
override get commandName() {
return 'listCollections' as const;
}
override async execute(server: Server, session: ClientSession | undefined): Promise<boolean> {
const coll = this.collection;
const [collection] = await coll.s.db
.listCollections(
{ name: coll.collectionName },
{ ...this.options, nameOnly: false, readPreference: this.readPreference, session }
)
.toArray();
if (collection == null || collection.options == null) {
throw new MongoAPIError(`collection ${coll.namespace} not found`);
}
return !!collection.options?.capped;
}
}

Some files were not shown because too many files have changed in this diff Show more