added split (sync and async) db connections pool

This commit is contained in:
Gani Georgiev 2022-12-15 16:42:35 +02:00
parent e964b019c2
commit b9e257d2b1
13 changed files with 304 additions and 127 deletions

View File

@ -4,11 +4,17 @@
- Added support for SMTP `LOGIN` auth for Microsoft/Outlook and other providers that dont't support the `PLAIN` auth method ([#1217](https://github.com/pocketbase/pocketbase/discussions/1217#discussioncomment-4387970)). - Added support for SMTP `LOGIN` auth for Microsoft/Outlook and other providers that dont't support the `PLAIN` auth method ([#1217](https://github.com/pocketbase/pocketbase/discussions/1217#discussioncomment-4387970)).
- Reduced memory consumption (~20% improvement). - Reduced memory consumption (you can expect ~20% less allocated memory).
- Added support for split (async and sync) DB connections pool increasing even further the concurrent throughput.
- Improved record references delete performance. - Improved record references delete performance.
- Removed the unnecessary parenthesis in the generated filter SQL query, reducing the "parse stack overflow" errors. - Removed the unnecessary parenthesis in the generated filter SQL query, reducing the "_parse stack overflow_" errors.
- Fixed `~` expressions backslash literal escaping ([#1231](https://github.com/pocketbase/pocketbase/discussions/1231)).
- Changed `core.NewBaseApp(dir, encryptionEnv, isDebug)` to `NewBaseApp(config *BaseAppConfig)` which allows to further configure the app instance.
- Removed `rest.UploadedFile` struct (see below `filesystem.File`). - Removed `rest.UploadedFile` struct (see below `filesystem.File`).
@ -27,9 +33,7 @@
forms.RecordUpsert.RemoveFiles(key, filenames...) // marks the filenames for deletion forms.RecordUpsert.RemoveFiles(key, filenames...) // marks the filenames for deletion
``` ```
- Fixed `LIKE` expressions backslash escaping ([#1231](https://github.com/pocketbase/pocketbase/discussions/1231)). - Trigger the `password` validators if any of the others password change fields is set.
- Trigger the `password` validators in any of the others password change fields is set.
## v0.9.2 ## v0.9.2

View File

@ -16,6 +16,11 @@ import (
// App defines the main PocketBase app interface. // App defines the main PocketBase app interface.
type App interface { type App interface {
// Deprecated:
// This method may get removed in the near future.
// It is recommended to access the logs db instance from app.Dao().DB() or
// if you want more flexibility - app.Dao().AsyncDB() and app.Dao().SyncDB().
//
// DB returns the default app database instance. // DB returns the default app database instance.
DB() *dbx.DB DB() *dbx.DB
@ -26,6 +31,11 @@ type App interface {
// trying to access the request logs table will result in error. // trying to access the request logs table will result in error.
Dao() *daos.Dao Dao() *daos.Dao
// Deprecated:
// This method may get removed in the near future.
// It is recommended to access the logs db instance from app.LogsDao().DB() or
// if you want more flexibility - app.LogsDao().AsyncDB() and app.LogsDao().SyncDB().
//
// LogsDB returns the app logs database instance. // LogsDB returns the app logs database instance.
LogsDB() *dbx.DB LogsDB() *dbx.DB

View File

@ -29,13 +29,15 @@ type BaseApp struct {
isDebug bool isDebug bool
dataDir string dataDir string
encryptionEnv string encryptionEnv string
dataMaxOpenConns int
dataMaxIdleConns int
logsMaxOpenConns int
logsMaxIdleConns int
// internals // internals
cache *store.Store[any] cache *store.Store[any]
settings *settings.Settings settings *settings.Settings
db *dbx.DB
dao *daos.Dao dao *daos.Dao
logsDB *dbx.DB
logsDao *daos.Dao logsDao *daos.Dao
subscriptionsBroker *subscriptions.Broker subscriptionsBroker *subscriptions.Broker
@ -132,15 +134,30 @@ type BaseApp struct {
onCollectionsAfterImportRequest *hook.Hook[*CollectionsImportEvent] onCollectionsAfterImportRequest *hook.Hook[*CollectionsImportEvent]
} }
// BaseAppConfig defines a BaseApp configuration option
type BaseAppConfig struct {
DataDir string
EncryptionEnv string
IsDebug bool
DataMaxOpenConns int // default to 600
DataMaxIdleConns int // default 20
LogsMaxOpenConns int // default to 500
LogsMaxIdleConns int // default to 10
}
// NewBaseApp creates and returns a new BaseApp instance // NewBaseApp creates and returns a new BaseApp instance
// configured with the provided arguments. // configured with the provided arguments.
// //
// To initialize the app, you need to call `app.Bootstrap()`. // To initialize the app, you need to call `app.Bootstrap()`.
func NewBaseApp(dataDir string, encryptionEnv string, isDebug bool) *BaseApp { func NewBaseApp(config *BaseAppConfig) *BaseApp {
app := &BaseApp{ app := &BaseApp{
dataDir: dataDir, dataDir: config.DataDir,
isDebug: isDebug, isDebug: config.IsDebug,
encryptionEnv: encryptionEnv, encryptionEnv: config.EncryptionEnv,
dataMaxOpenConns: config.DataMaxOpenConns,
dataMaxIdleConns: config.DataMaxIdleConns,
logsMaxOpenConns: config.LogsMaxOpenConns,
logsMaxIdleConns: config.LogsMaxIdleConns,
cache: store.New[any](nil), cache: store.New[any](nil),
settings: settings.New(), settings: settings.New(),
subscriptionsBroker: subscriptions.NewBroker(), subscriptionsBroker: subscriptions.NewBroker(),
@ -283,14 +300,20 @@ func (app *BaseApp) Bootstrap() error {
// ResetBootstrapState takes care for releasing initialized app resources // ResetBootstrapState takes care for releasing initialized app resources
// (eg. closing db connections). // (eg. closing db connections).
func (app *BaseApp) ResetBootstrapState() error { func (app *BaseApp) ResetBootstrapState() error {
if app.db != nil { if app.Dao() != nil {
if err := app.db.Close(); err != nil { if err := app.Dao().AsyncDB().(*dbx.DB).Close(); err != nil {
return err
}
if err := app.Dao().SyncDB().(*dbx.DB).Close(); err != nil {
return err return err
} }
} }
if app.logsDB != nil { if app.LogsDao() != nil {
if err := app.logsDB.Close(); err != nil { if err := app.LogsDao().AsyncDB().(*dbx.DB).Close(); err != nil {
return err
}
if err := app.LogsDao().SyncDB().(*dbx.DB).Close(); err != nil {
return err return err
} }
} }
@ -302,9 +325,23 @@ func (app *BaseApp) ResetBootstrapState() error {
return nil return nil
} }
// Deprecated:
// This method may get removed in the near future.
// It is recommended to access the db instance from app.Dao().DB() or
// if you want more flexibility - app.Dao().AsyncDB() and app.Dao().SyncDB().
//
// DB returns the default app database instance. // DB returns the default app database instance.
func (app *BaseApp) DB() *dbx.DB { func (app *BaseApp) DB() *dbx.DB {
return app.db if app.Dao() == nil {
return nil
}
db, ok := app.Dao().DB().(*dbx.DB)
if !ok {
return nil
}
return db
} }
// Dao returns the default app Dao instance. // Dao returns the default app Dao instance.
@ -312,9 +349,23 @@ func (app *BaseApp) Dao() *daos.Dao {
return app.dao return app.dao
} }
// Deprecated:
// This method may get removed in the near future.
// It is recommended to access the logs db instance from app.LogsDao().DB() or
// if you want more flexibility - app.LogsDao().AsyncDB() and app.LogsDao().SyncDB().
//
// LogsDB returns the app logs database instance. // LogsDB returns the app logs database instance.
func (app *BaseApp) LogsDB() *dbx.DB { func (app *BaseApp) LogsDB() *dbx.DB {
return app.logsDB if app.LogsDao() == nil {
return nil
}
db, ok := app.LogsDao().DB().(*dbx.DB)
if !ok {
return nil
}
return db
} }
// LogsDao returns the app logs Dao instance. // LogsDao returns the app logs Dao instance.
@ -751,41 +802,81 @@ func (app *BaseApp) OnCollectionsAfterImportRequest() *hook.Hook[*CollectionsImp
// ------------------------------------------------------------------- // -------------------------------------------------------------------
func (app *BaseApp) initLogsDB() error { func (app *BaseApp) initLogsDB() error {
var connectErr error maxOpenConns := 500
app.logsDB, connectErr = connectDB(filepath.Join(app.DataDir(), "logs.db")) maxIdleConns := 10
if connectErr != nil { if app.logsMaxOpenConns > 0 {
return connectErr maxOpenConns = app.logsMaxOpenConns
}
if app.logsMaxIdleConns > 0 {
maxIdleConns = app.logsMaxIdleConns
} }
app.logsDao = daos.New(app.logsDB) asyncDB, err := connectDB(filepath.Join(app.DataDir(), "logs.db"))
if err != nil {
return err
}
asyncDB.DB().SetMaxOpenConns(maxOpenConns)
asyncDB.DB().SetMaxIdleConns(maxIdleConns)
asyncDB.DB().SetConnMaxIdleTime(5 * time.Minute)
syncDB, err := connectDB(filepath.Join(app.DataDir(), "logs.db"))
if err != nil {
return err
}
syncDB.DB().SetMaxOpenConns(1)
syncDB.DB().SetMaxIdleConns(1)
syncDB.DB().SetConnMaxIdleTime(5 * time.Minute)
app.logsDao = daos.NewMultiDB(asyncDB, syncDB)
return nil return nil
} }
func (app *BaseApp) initDataDB() error { func (app *BaseApp) initDataDB() error {
var connectErr error maxOpenConns := 600
app.db, connectErr = connectDB(filepath.Join(app.DataDir(), "data.db")) maxIdleConns := 20
if connectErr != nil { if app.dataMaxOpenConns > 0 {
return connectErr maxOpenConns = app.dataMaxOpenConns
} }
if app.dataMaxIdleConns > 0 {
maxIdleConns = app.dataMaxIdleConns
}
asyncDB, err := connectDB(filepath.Join(app.DataDir(), "data.db"))
if err != nil {
return err
}
asyncDB.DB().SetMaxOpenConns(maxOpenConns)
asyncDB.DB().SetMaxIdleConns(maxIdleConns)
asyncDB.DB().SetConnMaxIdleTime(5 * time.Minute)
syncDB, err := connectDB(filepath.Join(app.DataDir(), "data.db"))
if err != nil {
return err
}
syncDB.DB().SetMaxOpenConns(1)
syncDB.DB().SetMaxIdleConns(1)
syncDB.DB().SetConnMaxIdleTime(5 * time.Minute)
if app.IsDebug() { if app.IsDebug() {
app.db.QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) { syncDB.QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) {
color.HiBlack("[%.2fms] %v\n", float64(t.Milliseconds()), sql) color.HiBlack("[%.2fms] %v\n", float64(t.Milliseconds()), sql)
} }
asyncDB.QueryLogFunc = syncDB.QueryLogFunc
app.db.ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) { syncDB.ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) {
color.HiBlack("[%.2fms] %v\n", float64(t.Milliseconds()), sql) color.HiBlack("[%.2fms] %v\n", float64(t.Milliseconds()), sql)
} }
asyncDB.ExecLogFunc = syncDB.ExecLogFunc
} }
app.dao = app.createDaoWithHooks(app.db) app.dao = app.createDaoWithHooks(asyncDB, syncDB)
return nil return nil
} }
func (app *BaseApp) createDaoWithHooks(db dbx.Builder) *daos.Dao { func (app *BaseApp) createDaoWithHooks(asyncDB, syncDB dbx.Builder) *daos.Dao {
dao := daos.New(db) dao := daos.NewMultiDB(asyncDB, syncDB)
dao.BeforeCreateFunc = func(eventDao *daos.Dao, m models.Model) error { dao.BeforeCreateFunc = func(eventDao *daos.Dao, m models.Model) error {
return app.OnModelBeforeCreate().Trigger(&ModelEvent{eventDao, m}) return app.OnModelBeforeCreate().Trigger(&ModelEvent{eventDao, m})

View File

@ -11,7 +11,11 @@ func TestNewBaseApp(t *testing.T) {
const testDataDir = "./pb_base_app_test_data_dir/" const testDataDir = "./pb_base_app_test_data_dir/"
defer os.RemoveAll(testDataDir) defer os.RemoveAll(testDataDir)
app := NewBaseApp(testDataDir, "test_env", true) app := NewBaseApp(&BaseAppConfig{
DataDir: testDataDir,
EncryptionEnv: "test_env",
IsDebug: true,
})
if app.dataDir != testDataDir { if app.dataDir != testDataDir {
t.Fatalf("expected dataDir %q, got %q", testDataDir, app.dataDir) t.Fatalf("expected dataDir %q, got %q", testDataDir, app.dataDir)
@ -42,7 +46,11 @@ func TestBaseAppBootstrap(t *testing.T) {
const testDataDir = "./pb_base_app_test_data_dir/" const testDataDir = "./pb_base_app_test_data_dir/"
defer os.RemoveAll(testDataDir) defer os.RemoveAll(testDataDir)
app := NewBaseApp(testDataDir, "pb_test_env", false) app := NewBaseApp(&BaseAppConfig{
DataDir: testDataDir,
EncryptionEnv: "pb_test_env",
IsDebug: false,
})
defer app.ResetBootstrapState() defer app.ResetBootstrapState()
// bootstrap // bootstrap
@ -112,29 +120,33 @@ func TestBaseAppGetters(t *testing.T) {
const testDataDir = "./pb_base_app_test_data_dir/" const testDataDir = "./pb_base_app_test_data_dir/"
defer os.RemoveAll(testDataDir) defer os.RemoveAll(testDataDir)
app := NewBaseApp(testDataDir, "pb_test_env", false) app := NewBaseApp(&BaseAppConfig{
DataDir: testDataDir,
EncryptionEnv: "pb_test_env",
IsDebug: false,
})
defer app.ResetBootstrapState() defer app.ResetBootstrapState()
if err := app.Bootstrap(); err != nil { if err := app.Bootstrap(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if app.db != app.DB() {
t.Fatalf("Expected app.DB %v, got %v", app.DB(), app.db)
}
if app.dao != app.Dao() { if app.dao != app.Dao() {
t.Fatalf("Expected app.Dao %v, got %v", app.Dao(), app.dao) t.Fatalf("Expected app.Dao %v, got %v", app.Dao(), app.dao)
} }
if app.logsDB != app.LogsDB() { if app.dao.AsyncDB() != app.DB() {
t.Fatalf("Expected app.LogsDB %v, got %v", app.LogsDB(), app.logsDB) t.Fatalf("Expected app.DB %v, got %v", app.DB(), app.dao.AsyncDB())
} }
if app.logsDao != app.LogsDao() { if app.logsDao != app.LogsDao() {
t.Fatalf("Expected app.LogsDao %v, got %v", app.LogsDao(), app.logsDao) t.Fatalf("Expected app.LogsDao %v, got %v", app.LogsDao(), app.logsDao)
} }
if app.logsDao.AsyncDB() != app.LogsDB() {
t.Fatalf("Expected app.LogsDB %v, got %v", app.LogsDB(), app.logsDao.AsyncDB())
}
if app.dataDir != app.DataDir() { if app.dataDir != app.DataDir() {
t.Fatalf("Expected app.DataDir %v, got %v", app.DataDir(), app.dataDir) t.Fatalf("Expected app.DataDir %v, got %v", app.DataDir(), app.dataDir)
} }
@ -400,7 +412,11 @@ func TestBaseAppNewMailClient(t *testing.T) {
const testDataDir = "./pb_base_app_test_data_dir/" const testDataDir = "./pb_base_app_test_data_dir/"
defer os.RemoveAll(testDataDir) defer os.RemoveAll(testDataDir)
app := NewBaseApp(testDataDir, "pb_test_env", false) app := NewBaseApp(&BaseAppConfig{
DataDir: testDataDir,
EncryptionEnv: "pb_test_env",
IsDebug: false,
})
client1 := app.NewMailClient() client1 := app.NewMailClient()
if val, ok := client1.(*mailer.Sendmail); !ok { if val, ok := client1.(*mailer.Sendmail); !ok {
@ -419,7 +435,11 @@ func TestBaseAppNewFilesystem(t *testing.T) {
const testDataDir = "./pb_base_app_test_data_dir/" const testDataDir = "./pb_base_app_test_data_dir/"
defer os.RemoveAll(testDataDir) defer os.RemoveAll(testDataDir)
app := NewBaseApp(testDataDir, "pb_test_env", false) app := NewBaseApp(&BaseAppConfig{
DataDir: testDataDir,
EncryptionEnv: "pb_test_env",
IsDebug: false,
})
// local // local
local, localErr := app.NewFilesystem() local, localErr := app.NewFilesystem()

20
core/db.go Normal file
View File

@ -0,0 +1,20 @@
package core
import (
"github.com/pocketbase/dbx"
)
func initPragmas(db *dbx.DB) error {
// note: the busy_timeout pragma must be first because
// the connection needs to be set to block on busy before WAL mode
// is set in case it hasn't been already set by another connection
_, err := db.NewQuery(`
PRAGMA busy_timeout = 10000;
PRAGMA journal_mode = WAL;
PRAGMA journal_size_limit = 100000000;
PRAGMA synchronous = NORMAL;
PRAGMA foreign_keys = TRUE;
`).Execute()
return err
}

View File

@ -3,35 +3,20 @@
package core package core
import ( import (
"fmt"
"time"
_ "github.com/mattn/go-sqlite3"
"github.com/pocketbase/dbx" "github.com/pocketbase/dbx"
_ "github.com/mattn/go-sqlite3"
) )
func connectDB(dbPath string) (*dbx.DB, error) { func connectDB(dbPath string) (*dbx.DB, error) {
// note: the busy_timeout pragma must be first because db, err := dbx.Open("sqlite3", dbPath)
// the connection needs to be set to block on busy before WAL mode if err != nil {
// is set in case it hasn't been already set by another connection return nil, err
pragmas := "_busy_timeout=10000&_journal_mode=WAL&_foreign_keys=1&_synchronous=NORMAL"
db, openErr := dbx.MustOpen("sqlite3", fmt.Sprintf("%s?%s", dbPath, pragmas))
if openErr != nil {
return nil, openErr
} }
// use a fixed connection pool to limit the SQLITE_BUSY errors if err := initPragmas(db); err != nil {
// and reduce the open file descriptors db.Close()
// (the limits are arbitrary and may change in the future) return nil, err
db.DB().SetMaxOpenConns(30) }
db.DB().SetMaxIdleConns(30)
db.DB().SetConnMaxIdleTime(5 * time.Minute) return db, nil
// additional pragmas not supported through the dsn string
_, err := db.NewQuery(`
pragma journal_size_limit = 100000000;
`).Execute()
return db, err
} }

View File

@ -3,30 +3,20 @@
package core package core
import ( import (
"fmt"
"time"
"github.com/pocketbase/dbx" "github.com/pocketbase/dbx"
_ "modernc.org/sqlite" _ "modernc.org/sqlite"
) )
func connectDB(dbPath string) (*dbx.DB, error) { func connectDB(dbPath string) (*dbx.DB, error) {
// note: the busy_timeout pragma must be first because db, err := dbx.Open("sqlite", dbPath)
// the connection needs to be set to block on busy before WAL mode
// is set in case it hasn't been already set by another connection
pragmas := "_pragma=busy_timeout(10000)&_pragma=journal_mode(WAL)&_pragma=foreign_keys(1)&_pragma=synchronous(NORMAL)&_pragma=journal_size_limit(100000000)"
db, err := dbx.MustOpen("sqlite", fmt.Sprintf("%s?%s", dbPath, pragmas))
if err != nil { if err != nil {
return nil, err return nil, err
} }
// use a fixed connection pool to limit the SQLITE_BUSY errors if err := initPragmas(db); err != nil {
// and reduce the open file descriptors db.Close()
// (the limits are arbitrary and may change in the future) return nil, err
db.DB().SetMaxOpenConns(30) }
db.DB().SetMaxIdleConns(30)
db.DB().SetConnMaxIdleTime(5 * time.Minute)
return db, nil return db, nil
} }

View File

@ -17,17 +17,29 @@ import (
const DefaultMaxFailRetries = 5 const DefaultMaxFailRetries = 5
// New creates a new Dao instance with the provided db builder. // New creates a new Dao instance with the provided db builder
// (for both async and sync db operations).
func New(db dbx.Builder) *Dao { func New(db dbx.Builder) *Dao {
return NewMultiDB(db, db)
}
// New creates a new Dao instance with the provided dedicated
// async and sync db builders.
func NewMultiDB(asyncDB, syncDB dbx.Builder) *Dao {
return &Dao{ return &Dao{
db: db, asyncDB: asyncDB,
syncDB: syncDB,
} }
} }
// Dao handles various db operations. // Dao handles various db operations.
// Think of Dao as a repository and service layer in one. // Think of Dao as a repository and service layer in one.
type Dao struct { type Dao struct {
db dbx.Builder // in a transaction both refer to the same *dbx.TX instance
asyncDB dbx.Builder
syncDB dbx.Builder
// @todo delete after removing Block and Continue
sem *semaphore.Weighted sem *semaphore.Weighted
mux sync.RWMutex mux sync.RWMutex
@ -39,11 +51,29 @@ type Dao struct {
AfterDeleteFunc func(eventDao *Dao, m models.Model) AfterDeleteFunc func(eventDao *Dao, m models.Model)
} }
// DB returns the internal db builder (*dbx.DB or *dbx.TX). // DB returns the default dao db builder (*dbx.DB or *dbx.TX).
//
// Currently the default db builder is dao.asyncDB but that may change in the future.
func (dao *Dao) DB() dbx.Builder { func (dao *Dao) DB() dbx.Builder {
return dao.db return dao.AsyncDB()
} }
// AsyncDB returns the dao asynchronous db builder (*dbx.DB or *dbx.TX).
//
// In a transaction the asyncDB and syncDB refer to the same *dbx.TX instance.
func (dao *Dao) AsyncDB() dbx.Builder {
return dao.asyncDB
}
// SyncDB returns the dao synchronous db builder (*dbx.DB or *dbx.TX).
//
// In a transaction the asyncDB and syncDB refer to the same *dbx.TX instance.
func (dao *Dao) SyncDB() dbx.Builder {
return dao.syncDB
}
// Deprecated: Will be removed in the next releases. Use [Dao.SyncDB()] instead.
//
// Block acquires a lock and blocks all other go routines that uses // Block acquires a lock and blocks all other go routines that uses
// the Dao instance until dao.Continue() is called, effectively making // the Dao instance until dao.Continue() is called, effectively making
// the concurrent requests to perform synchronous db operations. // the concurrent requests to perform synchronous db operations.
@ -75,6 +105,8 @@ func (dao *Dao) Block(ctx context.Context) error {
return dao.sem.Acquire(ctx, 1) return dao.sem.Acquire(ctx, 1)
} }
// Deprecated: Will be removed in the next releases. Use [Dao.SyncDB()] instead.
//
// Continue releases the previously acquired Block() lock. // Continue releases the previously acquired Block() lock.
func (dao *Dao) Continue() { func (dao *Dao) Continue() {
if dao.sem == nil { if dao.sem == nil {
@ -88,7 +120,7 @@ func (dao *Dao) Continue() {
// based on the provided model argument. // based on the provided model argument.
func (dao *Dao) ModelQuery(m models.Model) *dbx.SelectQuery { func (dao *Dao) ModelQuery(m models.Model) *dbx.SelectQuery {
tableName := m.TableName() tableName := m.TableName()
return dao.db.Select("{{" + tableName + "}}.*").From(tableName) return dao.DB().Select("{{" + tableName + "}}.*").From(tableName)
} }
// FindById finds a single db record with the specified id and // FindById finds a single db record with the specified id and
@ -105,9 +137,9 @@ type afterCallGroup struct {
// RunInTransaction wraps fn into a transaction. // RunInTransaction wraps fn into a transaction.
// //
// It is safe to nest RunInTransaction calls. // It is safe to nest RunInTransaction calls as long as you use the txDao.
func (dao *Dao) RunInTransaction(fn func(txDao *Dao) error) error { func (dao *Dao) RunInTransaction(fn func(txDao *Dao) error) error {
switch txOrDB := dao.db.(type) { switch txOrDB := dao.SyncDB().(type) {
case *dbx.Tx: case *dbx.Tx:
// nested transactions are not supported by default // nested transactions are not supported by default
// so execute the function within the current transaction // so execute the function within the current transaction
@ -165,14 +197,15 @@ func (dao *Dao) RunInTransaction(fn func(txDao *Dao) error) error {
if txError == nil { if txError == nil {
// execute after event calls on successful transaction // execute after event calls on successful transaction
// (note: using the non-transaction dao to allow following queries in the after hooks)
for _, call := range afterCalls { for _, call := range afterCalls {
switch call.Action { switch call.Action {
case "create": case "create":
dao.AfterCreateFunc(call.EventDao, call.Model) dao.AfterCreateFunc(dao, call.Model)
case "update": case "update":
dao.AfterUpdateFunc(call.EventDao, call.Model) dao.AfterUpdateFunc(dao, call.Model)
case "delete": case "delete":
dao.AfterDeleteFunc(call.EventDao, call.Model) dao.AfterDeleteFunc(dao, call.Model)
} }
} }
} }
@ -196,7 +229,7 @@ func (dao *Dao) Delete(m models.Model) error {
} }
} }
if err := retryDao.db.Model(m).Delete(); err != nil { if err := retryDao.SyncDB().Model(m).Delete(); err != nil {
return err return err
} }
@ -241,7 +274,7 @@ func (dao *Dao) update(m models.Model) error {
if v, ok := any(m).(models.ColumnValueMapper); ok { if v, ok := any(m).(models.ColumnValueMapper); ok {
dataMap := v.ColumnValueMap() dataMap := v.ColumnValueMap()
_, err := dao.db.Update( _, err := dao.SyncDB().Update(
m.TableName(), m.TableName(),
dataMap, dataMap,
dbx.HashExp{"id": m.GetId()}, dbx.HashExp{"id": m.GetId()},
@ -251,7 +284,7 @@ func (dao *Dao) update(m models.Model) error {
return err return err
} }
} else { } else {
if err := dao.db.Model(m).Update(); err != nil { if err := dao.SyncDB().Model(m).Update(); err != nil {
return err return err
} }
} }
@ -292,12 +325,12 @@ func (dao *Dao) create(m models.Model) error {
dataMap["id"] = m.GetId() dataMap["id"] = m.GetId()
} }
_, err := dao.db.Insert(m.TableName(), dataMap).Execute() _, err := dao.SyncDB().Insert(m.TableName(), dataMap).Execute()
if err != nil { if err != nil {
return err return err
} }
} else { } else {
if err := dao.db.Model(m).Insert(); err != nil { if err := dao.SyncDB().Model(m).Insert(); err != nil {
return err return err
} }
} }
@ -320,7 +353,7 @@ Retry:
if attempts == 2 { if attempts == 2 {
// assign new Dao without the before hooks to avoid triggering // assign new Dao without the before hooks to avoid triggering
// the already fired before callbacks multiple times // the already fired before callbacks multiple times
retryDao = New(dao.db) retryDao = NewMultiDB(dao.asyncDB, dao.syncDB)
retryDao.AfterCreateFunc = dao.AfterCreateFunc retryDao.AfterCreateFunc = dao.AfterCreateFunc
retryDao.AfterUpdateFunc = dao.AfterUpdateFunc retryDao.AfterUpdateFunc = dao.AfterUpdateFunc
retryDao.AfterDeleteFunc = dao.AfterDeleteFunc retryDao.AfterDeleteFunc = dao.AfterDeleteFunc

View File

@ -20,6 +20,25 @@ func TestNew(t *testing.T) {
} }
} }
func TestNewMultiDB(t *testing.T) {
testApp, _ := tests.NewTestApp()
defer testApp.Cleanup()
dao := daos.NewMultiDB(testApp.Dao().AsyncDB(), testApp.Dao().SyncDB())
if dao.DB() != testApp.Dao().AsyncDB() {
t.Fatal("[db-asyncdb] The 2 db instances are different")
}
if dao.AsyncDB() != testApp.Dao().AsyncDB() {
t.Fatal("[asyncdb-asyncdb] The 2 db instances are different")
}
if dao.SyncDB() != testApp.Dao().SyncDB() {
t.Fatal("[syncdb-syncdb] The 2 db instances are different")
}
}
func TestDaoModelQuery(t *testing.T) { func TestDaoModelQuery(t *testing.T) {
testApp, _ := tests.NewTestApp() testApp, _ := tests.NewTestApp()
defer testApp.Cleanup() defer testApp.Cleanup()

View File

@ -1,12 +1,10 @@
package daos package daos
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"math" "math"
"strings" "strings"
"time"
"github.com/pocketbase/dbx" "github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/models" "github.com/pocketbase/pocketbase/models"
@ -359,25 +357,12 @@ func (dao *Dao) DeleteRecord(record *models.Record) error {
return err return err
} }
// run all consequent DeleteRecord requests synchroniously
// to minimize SQLITE_BUSY errors
if len(refs) > 0 {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
if err := dao.Block(ctx); err != nil {
// ignore blocking and try to run directly...
} else {
defer dao.Continue()
}
}
return dao.RunInTransaction(func(txDao *Dao) error { return dao.RunInTransaction(func(txDao *Dao) error {
// manually trigger delete on any linked external auth to ensure // manually trigger delete on any linked external auth to ensure
// that the `OnModel*` hooks are triggered. // that the `OnModel*` hooks are triggered
// if record.Collection().IsAuth() {
// note: the select is outside of the transaction to minimize // note: the select is outside of the transaction to minimize
// SQLITE_BUSY errors when mixing read&write in a single transaction // SQLITE_BUSY errors when mixing read&write in a single transaction
if record.Collection().IsAuth() {
externalAuths, err := dao.FindAllExternalAuthsByRecord(record) externalAuths, err := dao.FindAllExternalAuthsByRecord(record)
if err != nil { if err != nil {
return err return err

View File

@ -634,10 +634,16 @@ func TestDeleteRecord(t *testing.T) {
// delete existing record + cascade // delete existing record + cascade
// --- // ---
calledQueries := []string{} calledQueries := []string{}
app.DB().QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) { app.Dao().SyncDB().(*dbx.DB).QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) {
calledQueries = append(calledQueries, sql) calledQueries = append(calledQueries, sql)
} }
app.DB().ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) { app.Dao().AsyncDB().(*dbx.DB).QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) {
calledQueries = append(calledQueries, sql)
}
app.Dao().SyncDB().(*dbx.DB).ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) {
calledQueries = append(calledQueries, sql)
}
app.Dao().AsyncDB().(*dbx.DB).ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) {
calledQueries = append(calledQueries, sql) calledQueries = append(calledQueries, sql)
} }
rec3, _ := app.Dao().FindRecordById("users", "oap640cot4yru2s") rec3, _ := app.Dao().FindRecordById("users", "oap640cot4yru2s")

View File

@ -49,6 +49,12 @@ type Config struct {
// hide the default console server info on app startup // hide the default console server info on app startup
HideStartBanner bool HideStartBanner bool
// optional DB configurations
DataMaxOpenConns int // default to 600
DataMaxIdleConns int // default 20
LogsMaxOpenConns int // default to 500
LogsMaxIdleConns int // default to 10
} }
// New creates a new PocketBase instance with the default configuration. // New creates a new PocketBase instance with the default configuration.
@ -105,11 +111,15 @@ func NewWithConfig(config Config) *PocketBase {
pb.eagerParseFlags(config) pb.eagerParseFlags(config)
// initialize the app instance // initialize the app instance
pb.appWrapper = &appWrapper{core.NewBaseApp( pb.appWrapper = &appWrapper{core.NewBaseApp(&core.BaseAppConfig{
pb.dataDirFlag, DataDir: pb.dataDirFlag,
pb.encryptionEnvFlag, EncryptionEnv: pb.encryptionEnvFlag,
pb.debugFlag, IsDebug: pb.debugFlag,
)} DataMaxOpenConns: config.DataMaxOpenConns,
DataMaxIdleConns: config.DataMaxIdleConns,
LogsMaxOpenConns: config.LogsMaxOpenConns,
LogsMaxIdleConns: config.LogsMaxIdleConns,
})}
// hide the default help command (allow only `--help` flag) // hide the default help command (allow only `--help` flag)
pb.RootCmd.SetHelpCommand(&cobra.Command{Hidden: true}) pb.RootCmd.SetHelpCommand(&cobra.Command{Hidden: true})

View File

@ -93,7 +93,11 @@ func NewTestApp(optTestDataDir ...string) (*TestApp, error) {
return nil, err return nil, err
} }
app := core.NewBaseApp(tempDir, "pb_test_env", false) app := core.NewBaseApp(&core.BaseAppConfig{
DataDir: tempDir,
EncryptionEnv: "pb_test_env",
IsDebug: false,
})
// load data dir and db connections // load data dir and db connections
if err := app.Bootstrap(); err != nil { if err := app.Bootstrap(); err != nil {