Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pbm/defs/defs.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ const (
)

const (
ConfigDB = "config"
ConfigDatabasesNS = "config.databases"
ConfigCollectionsNS = "config.collections"
ConfigChunksNS = "config.chunks"
Expand Down
119 changes: 92 additions & 27 deletions pbm/restore/logical.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/writeconcern"

"github.com/percona/percona-backup-mongodb/pbm/archive"
"github.com/percona/percona-backup-mongodb/pbm/backup"
Expand All @@ -38,6 +37,13 @@ import (
"github.com/percona/percona-backup-mongodb/pbm/version"
)

// mDBCl represents mDB client iterface for the DB related ops.
type mDBCl interface {
runCmdShardsvrDropDatabase(ctx context.Context, db string, configDBDoc *configDatabasesDoc) error
runCmdShardsvrDropCollection(ctx context.Context, db, coll string, configDBDoc *configDatabasesDoc) error
getConfigDatabasesDoc(ctx context.Context, db string) (*configDatabasesDoc, error)
}

Comment on lines +41 to +46
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this meant to represent a generic interface fro mongo commands or just shard ing operations specific for restore? If generic, then I am not sure whether this is the right location to define it. If specific, then we should find a better name

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That db layer just for the restore, it's private, and I was forced to create it to be able to unit test it. So it's near the usage place. If we refactor the whole code in restore we can move it somewhere else.

type Restore struct {
name string
leadConn connect.Client
Expand Down Expand Up @@ -69,6 +75,8 @@ type Restore struct {
opid string

indexCatalog *idx.IndexCatalog

db mDBCl
}

type oplogRange struct {
Expand Down Expand Up @@ -108,6 +116,7 @@ func New(
rsMap: rsMap,

cfg: cfg,
db: newMDB(leadConn, nodeConn),

numParallelColls: numParallelColls,
numInsertionWorkersPerCol: numInsertionWorkersPerCol,
Expand Down Expand Up @@ -253,11 +262,18 @@ func (r *Restore) Snapshot(
return err
}

// drop sharded dbs on sharded cluster, on each shard (not CSRS), only for full restore
if r.nodeInfo.IsSharded() && !r.nodeInfo.IsConfigSrv() && !util.IsSelective(nss) {
err = r.dropShardedDBs(ctx, bcp)
if err != nil {
return err
// drop databases on the sharded cluster as part of cleanup phase, on each shard (not CSRS)
if r.nodeInfo.IsSharded() && !r.nodeInfo.IsConfigSrv() {
if util.IsSelective(nss) {
err = r.selRestoreDBCleanup(ctx, nss)
if err != nil {
return errors.Wrap(err, "selective restore cleanup")
}
} else {
err = r.fullRestoreDBCleanup(ctx, bcp)
if err != nil {
return errors.Wrap(err, "full restore cleanup")
}
}
}
Comment on lines +265 to 278
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd extract this into standalone method e.g. r.shardedDBCleanup or w/e.

IMHO it's complex enough set of conditions so we don't want to maintain it at two different places exactly the same

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree with that, but PBM uses that style from before in the whole code base. So we do checks and branching within the main flow (orchestrator) on not within the function or method. I wouldn't change that style now, but I agree with your comment.


Expand Down Expand Up @@ -441,11 +457,18 @@ func (r *Restore) PITR(
return err
}

// drop sharded dbs on sharded cluster, on each shard (not CSRS), only for full restore
if r.nodeInfo.IsSharded() && !r.nodeInfo.IsConfigSrv() && !util.IsSelective(nss) {
err = r.dropShardedDBs(ctx, bcp)
if err != nil {
return err
// drop databases on the sharded cluster as part of cleanup phase, on each shard (not CSRS)
if r.nodeInfo.IsSharded() && !r.nodeInfo.IsConfigSrv() {
if util.IsSelective(nss) {
err = r.selRestoreDBCleanup(ctx, nss)
if err != nil {
return errors.Wrap(err, "selective restore cleanup")
}
} else {
err = r.fullRestoreDBCleanup(ctx, bcp)
if err != nil {
return errors.Wrap(err, "full restore cleanup")
}
}
}

Expand Down Expand Up @@ -866,23 +889,19 @@ func (r *Restore) toState(ctx context.Context, status defs.Status, wait *time.Du
return toState(ctx, r.leadConn, status, r.name, r.nodeInfo, r.reconcileStatus, wait)
}

// dropShardedDBs drop all sharded databases present in the backup.
// fullRestoreDBCleanup drops all databases present in the backup.
// Backup is specified with bcp parameter.
// For each sharded database present in the backup _shardsvrDropDatabase command
// For each database present in the backup _shardsvrDropDatabase command
// is used to drop the database from the config srv and all shards.
func (r *Restore) dropShardedDBs(ctx context.Context, bcp *backup.BackupMeta) error {
func (r *Restore) fullRestoreDBCleanup(ctx context.Context, bcp *backup.BackupMeta) error {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

previous name clearly communicated that this targets only Sharded environment. Maybe fullShardedDBCleanup (and selShardedDBCleanup)?

dbsInBcp, err := r.getDBsFromBackup(bcp)
if err != nil {
return errors.Wrap(err, "get dbs from backup")
}

// make cluster-wide drop for each db from the backup
for _, db := range dbsInBcp {
var configDBDoc configDatabasesDoc
err := r.leadConn.ConfigDatabase().
Collection("databases").
FindOne(ctx, bson.D{{"_id", db}}).
Decode(&configDBDoc)
configDBDoc, err := r.db.getConfigDatabasesDoc(ctx, db)
if err != nil {
if errors.Is(err, mongo.ErrNoDocuments) {
continue
Expand All @@ -895,21 +914,67 @@ func (r *Restore) dropShardedDBs(ctx context.Context, bcp *backup.BackupMeta) er
continue
}

cmd := bson.D{
{"_shardsvrDropDatabase", 1},
{"databaseVersion", configDBDoc.Version},
{"writeConcern", writeconcern.Majority()},
}
res := r.nodeConn.Database(db).RunCommand(ctx, cmd)
if err := res.Err(); err != nil {
return errors.Wrapf(err, "_shardsvrDropDatabase for %q", db)
err = r.db.runCmdShardsvrDropDatabase(ctx, db, configDBDoc)
if err != nil {
return errors.Wrap(err, "full restore cleanup")
}
r.log.Debug("drop %q", db)
}

return nil
}

// selRestoreDBCleanup drops all databases and/or collections specified in
// selective restore namespaces.
// For each namespace listed within selective restore _shardsvrDropDatabase or
// _shardsvrDropCollection is used for the purpose of clister-wide cleanup.
func (r *Restore) selRestoreDBCleanup(ctx context.Context, nss []string) error {
droppedDBs := []string{}
for _, ns := range nss {
db, coll := util.ParseNS(ns)
if db == "" || db == defs.DB || db == defs.ConfigDB {
// not allowed dbs for sel restore
continue
}
if slices.Contains(droppedDBs, db) {
// db is already dropped
continue
}

configDBDoc, err := r.db.getConfigDatabasesDoc(ctx, db)
if err != nil {
if errors.Is(err, mongo.ErrNoDocuments) {
continue
}
return errors.Wrapf(err, "get config.databases doc for %q", db)
}

if configDBDoc.Primary != r.nodeInfo.SetName {
// this shard is not primary shard for this db, so ignore it
continue
}

if coll == "" {
// do db cleanup
err = r.db.runCmdShardsvrDropDatabase(ctx, db, configDBDoc)
if err != nil {
return errors.Wrapf(err, "db cleanup %s", db)
}
droppedDBs = append(droppedDBs, db)
r.log.Debug("drop db: %q", db)
} else {
// do collection cleanup
err = r.db.runCmdShardsvrDropCollection(ctx, db, coll, configDBDoc)
if err != nil {
return errors.Wrapf(err, "collection cleanup %s", ns)
}
r.log.Debug("drop ns: %q", ns)
}
}

return nil
}

// getDBsFromBackup returns all databases present in backup metadata file
// for each replicaset.
func (r *Restore) getDBsFromBackup(bcp *backup.BackupMeta) ([]string, error) {
Expand Down
36 changes: 0 additions & 36 deletions pbm/restore/logical_cfgsvr_full_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,59 +6,23 @@ import (
"encoding/hex"
"fmt"
"io"
"log"
"os"
"testing"
"time"

"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/mongodb"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"

"github.com/percona/percona-backup-mongodb/pbm/backup"
"github.com/percona/percona-backup-mongodb/pbm/compress"
"github.com/percona/percona-backup-mongodb/pbm/connect"
"github.com/percona/percona-backup-mongodb/pbm/errors"
pbmlog "github.com/percona/percona-backup-mongodb/pbm/log"
"github.com/percona/percona-backup-mongodb/pbm/storage"
"github.com/percona/percona-backup-mongodb/pbm/topo"
"github.com/percona/percona-backup-mongodb/pbm/util"
)

var leadConn connect.Client

const configSystemSessionsUUID = "92e8635902a743a09cf410d4a2a4a576"

func TestMain(m *testing.M) {
ctx := context.Background()
mongodbContainer, err := mongodb.Run(ctx, "perconalab/percona-server-mongodb:8.0.4-multi")
if err != nil {
log.Fatalf("error while creating mongo test container: %v", err)
}
connStr, err := mongodbContainer.ConnectionString(ctx)
if err != nil {
log.Fatalf("conn string error: %v", err)
}

leadConn, err = connect.Connect(ctx, connStr, "restore-test")
if err != nil {
log.Fatalf("mongo client connect error: %v", err)
}

code := m.Run()

err = leadConn.Disconnect(ctx)
if err != nil {
log.Fatalf("mongo client disconnect error: %v", err)
}
if err := testcontainers.TerminateContainer(mongodbContainer); err != nil {
log.Fatalf("failed to terminate container: %s", err)
}

os.Exit(code)
}

func TestFullRestoreConfigDatabases(t *testing.T) {
ctx := context.Background()

Expand Down
Loading
Loading