MongoDB Initial Sync Progress Monitoring

Last Updated: Feb 14, 2020

Sometimes our replica set members fall off the oplog and the node needs to be resynced. When this happens, an Initial Sync is required, which does the following:

  1. Clones all databases except the local database. To clone, the mongod scans every collection in each source database and inserts all data into its own copies of these collections.
  2. Applies all changes to the data set. Using the oplog from the source, the mongod updates its data set to reflect the current state of the replica set.

When the initial sync finishes, the member transitions from STARTUP2 to SECONDARY.

Some common questions when performing an initial sync of a Replica Set Member are:

  • How do I know if the sync is progressing?
  • How long will this take to complete?

Determining if the sync is progressing can be done by either checking the size of the dbPath of the syncing node or by running the db.adminCommand({ replSetGetStatus: 1, initialSync: 1 }) command while connected to the SECONDARY via the mongo shell.

Checking the directory size of the SECONDARY that is being initial sync'ed will provide a good approximation as to how much data still remains to be copied. Note that as the WiredTiger storage engine doesn’t “release” space when documents are deleted there is a high probability that the SECONDARY will have a smaller total directory size than the sync source.

The second step (after cloning) where the oplog entries are applied will also affect the overall time required to sync from the sync source.

The replSetGetStatus command will produce a JSON document similar to the following. This document contains extensive details as to how the database/collection cloning is progressing, as well as any errors that have occurred during the process.

{
  "set": "replset",
  "date": ISODate("2019-12-04T05:12:52.835Z"),
  "myState": 5,
  "term": NumberLong(3),
  "syncingTo": "m2.example.net:27017",
  "syncSourceHost": "m2.example.net:27017",
  "syncSourceId": 1,
  "heartbeatIntervalMillis": NumberLong(2000),
  "majorityVoteCount": 2,
  "writeMajorityCount": 2,
  "optimes": {
    "lastCommittedOpTime": {
      "ts": Timestamp(0, 0),
      "t": NumberLong(-1)
    },
    "lastCommittedWallTime": ISODate("1970-01-01T00:00:00Z"),
    "appliedOpTime": {
      "ts": Timestamp(0, 0),
      "t": NumberLong(-1)
    },
    "durableOpTime": {
      "ts": Timestamp(0, 0),
      "t": NumberLong(-1)
    },
    "lastAppliedWallTime": ISODate("1970-01-01T00:00:00Z"),
    "lastDurableWallTime": ISODate("1970-01-01T00:00:00Z")
  },
  "lastStableRecoveryTimestamp": Timestamp(0, 0),
  "lastStableCheckpointTimestamp": Timestamp(0, 0),
  "initialSyncStatus": {
    "failedInitialSyncAttempts": 0,
    "maxFailedInitialSyncAttempts": 10,
    "initialSyncStart": ISODate("2019-12-04T05:12:35.719Z"),
    "initialSyncAttempts": [],
    "fetchedMissingDocs": 0,
    "appliedOps": 0,
    "initialSyncOplogStart": Timestamp(1575436355, 1),
    "databases": {
      "databasesCloned": 2,
      "admin": {
        "collections": 4,
        "clonedCollections": 4,
        "start": ISODate("2019-12-04T05:12:35.947Z"),
        "end": ISODate("2019-12-04T05:12:36.486Z"),
        "elapsedMillis": 539,
        "admin.system.roles": {
          "documentsToCopy": 12,
          "documentsCopied": 12,
          "indexes": 2,
          "fetchedBatches": 1,
          "start": ISODate("2019-12-04T05:12:35.950Z"),
          "end": ISODate("2019-12-04T05:12:36.101Z"),
          "elapsedMillis": 151,
          "receivedBatches": 1
        },
        "admin.system.users": {
          "documentsToCopy": 22,
          "documentsCopied": 22,
          "indexes": 2,
          "fetchedBatches": 1,
          "start": ISODate("2019-12-04T05:12:36.101Z"),
          "end": ISODate("2019-12-04T05:12:36.252Z"),
          "elapsedMillis": 151,
          "receivedBatches": 1
        },
        "admin.system.keys": {
          "documentsToCopy": 2,
          "documentsCopied": 2,
          "indexes": 1,
          "fetchedBatches": 1,
          "start": ISODate("2019-12-04T05:12:36.252Z"),
          "end": ISODate("2019-12-04T05:12:36.372Z"),
          "elapsedMillis": 120,
          "receivedBatches": 1
        },
        "admin.system.version": {
          "documentsToCopy": 2,
          "documentsCopied": 2,
          "indexes": 1,
          "fetchedBatches": 1,
          "start": ISODate("2019-12-04T05:12:36.372Z"),
          "end": ISODate("2019-12-04T05:12:36.486Z"),
          "elapsedMillis": 114,
          "receivedBatches": 1
        }
      },
      "config": {
        "collections": 2,
        "clonedCollections": 2,
        "start": ISODate("2019-12-04T05:12:36.486Z"),
        "end": ISODate("2019-12-04T05:12:36.863Z"),
        "elapsedMillis": 377,
        "config.transactions": {
          "documentsToCopy": 0,
          "documentsCopied": 0,
          "indexes": 1,
          "fetchedBatches": 0,
          "start": ISODate("2019-12-04T05:12:36.487Z"),
          "end": ISODate("2019-12-04T05:12:36.645Z"),
          "elapsedMillis": 158,
          "receivedBatches": 0
        },
        "config.system.sessions": {
          "documentsToCopy": 1,
          "documentsCopied": 1,
          "indexes": 2,
          "fetchedBatches": 1,
          "start": ISODate("2019-12-04T05:12:36.645Z"),
          "end": ISODate("2019-12-04T05:12:36.863Z"),
          "elapsedMillis": 218,
          "receivedBatches": 1
        }
      },
      "test": {
        "collections": 1,
        "clonedCollections": 0,
        "start": ISODate("2019-12-04T05:12:36.863Z"),
        "test.hugeindex": {
          "documentsToCopy": 25000,
          "documentsCopied": 9187,
          "indexes": 2,
          "fetchedBatches": 8,
          "start": ISODate("2019-12-04T05:12:36.865Z"),
          "receivedBatches": 9
        }
      }
    }
  },
  "members": [
    {
      "id": 0,
      "name": "m1.example.net:27017",
      "ip": "198.51.100.1",
      "health": 1,
      "state": 1,
      "stateStr": "PRIMARY",
      "uptime": 17,
      "optime": {
        "ts": Timestamp(1575436355, 1),
        "t": NumberLong(3)
      },
      "optimeDurable": {
        "ts": Timestamp(1575436355, 1),
        "t": NumberLong(3)
      },
      "optimeDate": ISODate("2019-12-04T05:12:35Z"),
      "optimeDurableDate": ISODate("2019-12-04T05:12:35Z"),
      "lastHeartbeat": ISODate("2019-12-04T05:12:52.216Z"),
      "lastHeartbeatRecv": ISODate("2019-12-04T05:12:51.485Z"),
      "pingMs": NumberLong(0),
      "lastHeartbeatMessage": "",
      "syncingTo": "",
      "syncSourceHost": "",
      "syncSourceId": -1,
      "infoMessage": "",
      "electionTime": Timestamp(1575434944, 1),
      "electionDate": ISODate("2019-12-04T04:49:04Z"),
      "configVersion": 3
    },
    {
      "id": 1,
      "name": "m2.example.net:27017",
      "ip": "198.51.100.2",
      "health": 1,
      "state": 2,
      "stateStr": "SECONDARY",
      "uptime": 17,
      "optime": {
        "ts": Timestamp(1575436355, 1),
        "t": NumberLong(3)
      },
      "optimeDurable": {
        "ts": Timestamp(1575436355, 1),
        "t": NumberLong(3)
      },
      "optimeDate": ISODate("2019-12-04T05:12:35Z"),
      "optimeDurableDate": ISODate("2019-12-04T05:12:35Z"),
      "lastHeartbeat": ISODate("2019-12-04T05:12:52.216Z"),
      "lastHeartbeatRecv": ISODate("2019-12-04T05:12:52.728Z"),
      "pingMs": NumberLong(0),
      "lastHeartbeatMessage": "",
      "syncingTo": "",
      "syncSourceHost": "",
      "syncSourceId": -1,
      "infoMessage": "",
      "configVersion": 3
    },
    {
      "_id": 2,
      "name": "m3.example.net:27017",
      "ip": "198.51.100.3",
      "health": 1,
      "state": 5,
      "stateStr": "STARTUP2",
      "uptime": 71,
      "optime": {
        "ts": Timestamp(0,
        0),
        "t": NumberLong(-1)
      },
      "optimeDate": ISODate("1970-01-01T00:00:00Z"),
      "syncingTo": "m2.example.net:27017",
      "syncSourceHost": "m2.example.net:27017",
      "syncSourceId": 1,
      "infoMessage": "",
      "configVersion": 3,
      "self": true,
      "lastHeartbeatMessage": ""
    }
  ],
  "ok": 1
}

Depending on the number of databases and collections being sync'ed, the size of this document can be quite large and difficult to visually parse.

To improve this situation I’ve created the following script.

/
* initialSyncProgress
* @author Alex Bevilacqua <alex@alexbevi.com>
*
* Can be run against a MongoDB 3.4+ mongod that is in STARTUP2 (intitial sync) state to gain some
* insight into how the sync is progressing. This script WILL NOT tell you how long until the sync
* is complete, but based on how the script reports progress can be used to estimate this.
*
* usage:
*   mongo –quiet –eval "load('initialSyncProgress.js'); initialSyncProgress();"
/
var printPercentage = function (position, length, type) {
  var p = Math.round((position / length) * 100, 2);
  return position + "/" + length + " " + type + " (" + p + "%)";
}

var msToTime = function (duration) { var milliseconds = parseInt((duration % 1000) / 100), seconds = Math.floor((duration / 1000) % 60), minutes = Math.floor((duration / (1000 * 60)) % 60), hours = Math.floor((duration / (1000 * 60 * 60)) % 24);

hours = (hours < 10) ? "0" + hours : hours; minutes = (minutes < 10) ? "0" + minutes : minutes; seconds = (seconds < 10) ? "0" + seconds : seconds;

return hours + ":" + minutes + ":" + seconds + "." + milliseconds; }

var initialSyncProgress = function () { var status = db.adminCommand({ replSetGetStatus: 1, initialSync: 1 }); var dbs_cloned = status.initialSyncStatus.databases.databasesCloned; delete status.initialSyncStatus.databases.databasesCloned; var dbs = Object.keys(status.initialSyncStatus.databases); var dbs_total = dbs.length;

// total time elapsed syncing databases var elapsedMillis = 0;

// status message based on the position within the currently // cloning database (collections cloned of collections total) var currentlyCloningStatus = "";

for (var i = 0; i < dbs_total; i++) { var d = status.initialSyncStatus.databases[dbs[i]]; // if the counts aren't the same either it's the database that's in progress or // hasn't started cloning yet if (d.clonedCollections < d.collections) { currentlyCloningStatus = "Cloning database " + dbs[i]; currentlyCloningStatus += " - cloned " + printPercentage(d.clonedCollections, d.collections, "collections"); var collectionKeys = Object.keys(d); for (var j = 0; j < collectionKeys.length; j++) { var c = d[collectionKeys[j]]; if (c.hasOwnProperty("documentsToCopy") && (c.documentsCopied < c.documentsToCopy)) { currentlyCloningStatus += "\nCloning collection " + collectionKeys[j] + " " + printPercentage(c.documentsCopied, c.documentsToCopy, "documents"); } } } // only add time if there's time to record if (d.hasOwnProperty("elapsedMillis")) { elapsedMillis += d.elapsedMillis; } } print("===================") print("Initial Sync Status") print("===================") var now = new Date(); var started = status.initialSyncStatus.initialSyncStart; print("Cloning started at " + started + " (" + msToTime(now - started) + " ago)"); var members = status.members; for (var i = 0; i < members.length; i++) { if (members[i].stateStr == "PRIMARY") { var optime = members[i].optimeDate var me = new Date(status.initialSyncStatus.initialSyncOplogStart.getTime() * 1000); print("Currently " + msToTime(optime - me) + " behind the PRIMARY (based on optimes)"); } } if (status.initialSyncStatus.hasOwnProperty("initialSyncAttempts") && status.initialSyncStatus.initialSyncAttempts.length > 0) { var failures = status.initialSyncStatus.initialSyncAttempts.length; print("Cloning has already failed " + failures + " time(s) …"); print("Last Failure: " + status.initialSyncStatus.initialSyncAttempts[failures - 1].status); } print("Copying databases for " + msToTime(elapsedMillis) + ". Note this updates AFTER a collection has been cloned."); print("Cloned " + printPercentage(dbs_cloned, dbs_total, "databases")); print(currentlyCloningStatus); }

By running this against the SECONDARY from the mongo shell, a more concise representation of the initialSyncStatus document is produced:

The script will also let you know if there have been any sync failures recorded, as well as what the last failure was.

Hopefully you’ll find this useful when the time comes to resync one of your nodes.

Comments