(untested!) added temp backup api scaffoldings before introducing autobackups and rotations

This commit is contained in:
Gani Georgiev 2023-05-08 21:52:40 +03:00
parent 60eee96034
commit d3314e1e23
17 changed files with 914 additions and 40 deletions

View File

@ -29,6 +29,14 @@
- Updated the logs "real" user IP to check for `Fly-Client-IP` header and changed the `X-Forward-For` header to use the first non-empty leftmost-ish IP as it the closest to the "real IP".
- Added new `archive.Create()` and `archive.Extract()` helpers (_currently works only with zip_).
- Added new `Filesystem.List(prefix)` helper to retrieve a flat list with all files under the provided prefix.
- Added new `App.NewBackupsFilesystem()` helper to create a dedicated fs abstraction for managing app backups.
- (@todo docs) Added new `App.OnTerminate()` hook.
## v0.15.3

189
apis/backup.go Normal file
View File

@ -0,0 +1,189 @@
package apis
import (
"context"
"log"
"net/http"
"path/filepath"
"time"
"github.com/labstack/echo/v5"
"github.com/pocketbase/pocketbase/core"
"github.com/pocketbase/pocketbase/forms"
"github.com/pocketbase/pocketbase/tools/types"
"github.com/spf13/cast"
)
// bindBackupApi registers the file api endpoints and the corresponding handlers.
//
// @todo add hooks once the app hooks api restructuring is finalized
func bindBackupApi(app core.App, rg *echo.Group) {
api := backupApi{app: app}
subGroup := rg.Group("/backups", ActivityLogger(app))
subGroup.GET("", api.list, RequireAdminAuth())
subGroup.POST("", api.create, RequireAdminAuth())
subGroup.GET("/:name", api.download)
subGroup.DELETE("/:name", api.delete, RequireAdminAuth())
subGroup.POST("/:name/restore", api.restore, RequireAdminAuth())
}
type backupApi struct {
app core.App
}
type backupItem struct {
Name string `json:"name"`
Size int64 `json:"size"`
Modified types.DateTime `json:"modified"`
}
func (api *backupApi) list(c echo.Context) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
fsys, err := api.app.NewBackupsFilesystem()
if err != nil {
return NewBadRequestError("Failed to load backups filesystem", err)
}
defer fsys.Close()
fsys.SetContext(ctx)
backups, err := fsys.List("")
if err != nil {
return NewBadRequestError("Failed to retrieve backup items. Raw error: \n"+err.Error(), nil)
}
result := make([]backupItem, len(backups))
for i, obj := range backups {
modified, _ := types.ParseDateTime(obj.ModTime)
result[i] = backupItem{
Name: obj.Key,
Size: obj.Size,
Modified: modified,
}
}
return c.JSON(http.StatusOK, result)
}
func (api *backupApi) create(c echo.Context) error {
if cast.ToString(api.app.Cache().Get(core.CacheActiveBackupsKey)) != "" {
return NewBadRequestError("Try again later - another backup/restore process has already been started", nil)
}
form := forms.NewBackupCreate(api.app)
if err := c.Bind(form); err != nil {
return NewBadRequestError("An error occurred while loading the submitted data.", err)
}
return form.Submit(func(next forms.InterceptorNextFunc[string]) forms.InterceptorNextFunc[string] {
return func(name string) error {
if err := next(name); err != nil {
return NewBadRequestError("Failed to create backup", err)
}
return c.NoContent(http.StatusNoContent)
}
})
}
func (api *backupApi) download(c echo.Context) error {
fileToken := c.QueryParam("token")
_, err := api.app.Dao().FindAdminByToken(
fileToken,
api.app.Settings().AdminFileToken.Secret,
)
if err != nil {
return NewForbiddenError("Insufficient permissions to access the resource.", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
fsys, err := api.app.NewBackupsFilesystem()
if err != nil {
return NewBadRequestError("Failed to load backups filesystem", err)
}
defer fsys.Close()
fsys.SetContext(ctx)
name := c.PathParam("name")
br, err := fsys.GetFile(name)
if err != nil {
return NewBadRequestError("Failed to retrieve backup item. Raw error: \n"+err.Error(), nil)
}
defer br.Close()
return fsys.Serve(
c.Response(),
c.Request(),
name,
filepath.Base(name), // without the path prefix (if any)
)
}
func (api *backupApi) restore(c echo.Context) error {
if cast.ToString(api.app.Cache().Get(core.CacheActiveBackupsKey)) != "" {
return NewBadRequestError("Try again later - another backup/restore process has already been started", nil)
}
name := c.PathParam("name")
existsCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
fsys, err := api.app.NewBackupsFilesystem()
if err != nil {
return NewBadRequestError("Failed to load backups filesystem", err)
}
defer fsys.Close()
fsys.SetContext(existsCtx)
if exists, err := fsys.Exists(name); !exists {
return NewNotFoundError("Missing or invalid backup file", err)
}
go func() {
// wait max 10 minutes
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
// give some optimistic time to write the response
time.Sleep(1 * time.Second)
if err := api.app.RestoreBackup(ctx, name); err != nil && api.app.IsDebug() {
log.Println(err)
}
}()
return c.NoContent(http.StatusNoContent)
}
func (api *backupApi) delete(c echo.Context) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
fsys, err := api.app.NewBackupsFilesystem()
if err != nil {
return NewBadRequestError("Failed to load backups filesystem", err)
}
defer fsys.Close()
fsys.SetContext(ctx)
name := c.PathParam("name")
if err := fsys.Delete(name); err != nil {
return NewBadRequestError("Invalid or already deleted backup file. Raw error: \n"+err.Error(), nil)
}
return c.NoContent(http.StatusNoContent)
}

View File

@ -113,6 +113,7 @@ func InitApi(app core.App) (*echo.Echo, error) {
bindFileApi(app, api)
bindRealtimeApi(app, api)
bindLogsApi(app, api)
bindBackupApi(app, api)
bindHealthApi(app, api)
// trigger the custom BeforeServe hook for the created api router
@ -191,7 +192,7 @@ func bindStaticAdminUI(app core.App, e *echo.Echo) error {
return nil
}
const totalAdminsCacheKey = "totalAdmins"
const totalAdminsCacheKey = "@totalAdmins"
func updateTotalAdminsCache(app core.App) error {
total, err := app.Dao().TotalAdmins()

View File

@ -1,6 +1,7 @@
package apis
import (
"context"
"crypto/tls"
"log"
"net"
@ -85,7 +86,7 @@ func Serve(app core.App, options *ServeOptions) error {
GetCertificate: certManager.GetCertificate,
NextProtos: []string{acme.ALPNProto},
},
ReadTimeout: 5 * time.Minute,
ReadTimeout: 10 * time.Minute,
ReadHeaderTimeout: 30 * time.Second,
// WriteTimeout: 60 * time.Second, // breaks sse!
Handler: router,
@ -119,6 +120,14 @@ func Serve(app core.App, options *ServeOptions) error {
regular.Printf(" ➜ Admin UI: %s\n", color.CyanString("%s://%s/_/", schema, serverConfig.Addr))
}
// try to gracefully shutdown the server on app termination
app.OnTerminate().Add(func(e *core.TerminateEvent) error {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
serverConfig.Shutdown(ctx)
return nil
})
// start HTTPS server
if options.HttpsAddr != "" {
// if httpAddr is set, start an HTTP server to redirect the traffic to the HTTPS version

View File

@ -4,6 +4,8 @@
package core
import (
"context"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models/settings"
@ -69,12 +71,20 @@ type App interface {
// NewMailClient creates and returns a configured app mail client.
NewMailClient() mailer.Mailer
// NewFilesystem creates and returns a configured filesystem.System instance.
// NewFilesystem creates and returns a configured filesystem.System instance
// for managing regular app files (eg. collection uploads).
//
// NB! Make sure to call `Close()` on the returned result
// after you are done working with it.
NewFilesystem() (*filesystem.System, error)
// NewBackupsFilesystem creates and returns a configured filesystem.System instance
// for managing app backups.
//
// NB! Make sure to call `Close()` on the returned result
// after you are done working with it.
NewBackupsFilesystem() (*filesystem.System, error)
// RefreshSettings reinitializes and reloads the stored application settings.
RefreshSettings() error
@ -92,6 +102,31 @@ type App interface {
// (eg. closing db connections).
ResetBootstrapState() error
// CreateBackup creates a new backup of the current app pb_data directory.
//
// Backups can be stored on S3 if it is configured in app.Settings().Backups.
//
// Please refer to the godoc of the specific core.App implementation
// for details on the backup procedures.
CreateBackup(ctx context.Context, name string) error
// RestoreBackup restores the backup with the specified name and restarts
// the current running application process.
//
// The safely perform the restore it is recommended to have free disk space
// for at least 2x the size of the restored pb_data backup.
//
// Please refer to the godoc of the specific core.App implementation
// for details on the restore procedures.
//
// NB! This feature is experimental and currently is expected to work only on UNIX based systems.
RestoreBackup(ctx context.Context, name string) error
// Restart restarts the current running application process.
//
// Currently it is relying on execve so it is supported only on UNIX based systems.
Restart() error
// ---------------------------------------------------------------
// App event hooks
// ---------------------------------------------------------------
@ -118,6 +153,10 @@ type App interface {
// It could be used to log the final API error in external services.
OnAfterApiError() *hook.Hook[*ApiErrorEvent]
// OnTerminate hook is triggered when the app is in the process
// of being terminated (eg. on SIGTERM signal).
OnTerminate() *hook.Hook[*TerminateEvent]
// ---------------------------------------------------------------
// Dao event hooks
// ---------------------------------------------------------------

View File

@ -7,6 +7,8 @@ import (
"log"
"os"
"path/filepath"
"runtime"
"syscall"
"time"
"github.com/fatih/color"
@ -27,6 +29,10 @@ const (
DefaultDataMaxIdleConns int = 20
DefaultLogsMaxOpenConns int = 10
DefaultLogsMaxIdleConns int = 2
LocalStorageDirName string = "storage"
LocalBackupsDirName string = "backups"
LocalTempDirName string = ".pb_temp_to_delete" // temp pb_data sub directory that will be deleted on each app.Bootstrap()
)
var _ App = (*BaseApp)(nil)
@ -55,6 +61,7 @@ type BaseApp struct {
onBeforeServe *hook.Hook[*ServeEvent]
onBeforeApiError *hook.Hook[*ApiErrorEvent]
onAfterApiError *hook.Hook[*ApiErrorEvent]
onTerminate *hook.Hook[*TerminateEvent]
// dao event hooks
onModelBeforeCreate *hook.Hook[*ModelEvent]
@ -192,6 +199,7 @@ func NewBaseApp(config *BaseAppConfig) *BaseApp {
onBeforeServe: &hook.Hook[*ServeEvent]{},
onBeforeApiError: &hook.Hook[*ApiErrorEvent]{},
onAfterApiError: &hook.Hook[*ApiErrorEvent]{},
onTerminate: &hook.Hook[*TerminateEvent]{},
// dao event hooks
onModelBeforeCreate: &hook.Hook[*ModelEvent]{},
@ -338,6 +346,9 @@ func (app *BaseApp) Bootstrap() error {
// we don't check for an error because the db migrations may have not been executed yet
app.RefreshSettings()
// cleanup the pb_data temp directory (if any)
os.RemoveAll(filepath.Join(app.DataDir(), LocalTempDirName))
if err := app.OnAfterBootstrap().Trigger(event); err != nil && app.IsDebug() {
log.Println(err)
}
@ -471,6 +482,7 @@ func (app *BaseApp) NewMailClient() mailer.Mailer {
}
// NewFilesystem creates a new local or S3 filesystem instance
// for managing regular app files (eg. collection uploads)
// based on the current app settings.
//
// NB! Make sure to call `Close()` on the returned result
@ -488,7 +500,54 @@ func (app *BaseApp) NewFilesystem() (*filesystem.System, error) {
}
// fallback to local filesystem
return filesystem.NewLocal(filepath.Join(app.DataDir(), "storage"))
return filesystem.NewLocal(filepath.Join(app.DataDir(), LocalStorageDirName))
}
// NewFilesystem creates a new local or S3 filesystem instance
// for managing app backups based on the current app settings.
//
// NB! Make sure to call `Close()` on the returned result
// after you are done working with it.
func (app *BaseApp) NewBackupsFilesystem() (*filesystem.System, error) {
if app.settings != nil && app.settings.Backups.S3.Enabled {
return filesystem.NewS3(
app.settings.Backups.S3.Bucket,
app.settings.Backups.S3.Region,
app.settings.Backups.S3.Endpoint,
app.settings.Backups.S3.AccessKey,
app.settings.Backups.S3.Secret,
app.settings.Backups.S3.ForcePathStyle,
)
}
// fallback to local filesystem
return filesystem.NewLocal(filepath.Join(app.DataDir(), LocalBackupsDirName))
}
// Restart restarts (aka. replaces) the current running application process.
//
// NB! It relies on execve which is supported only on UNIX based systems.
func (app *BaseApp) Restart() error {
if runtime.GOOS == "windows" {
return errors.New("restart is not supported on windows")
}
execPath, err := os.Executable()
if err != nil {
return err
}
// optimistically reset the app bootstrap state
app.ResetBootstrapState()
if err := syscall.Exec(execPath, os.Args, os.Environ()); err != nil {
// restart the app bootstrap state
app.Bootstrap()
return err
}
return nil
}
// RefreshSettings reinitializes and reloads the stored application settings.
@ -541,6 +600,10 @@ func (app *BaseApp) OnAfterApiError() *hook.Hook[*ApiErrorEvent] {
return app.onAfterApiError
}
func (app *BaseApp) OnTerminate() *hook.Hook[*TerminateEvent] {
return app.onTerminate
}
// -------------------------------------------------------------------
// Dao event hooks
// -------------------------------------------------------------------
@ -1084,4 +1147,9 @@ func (app *BaseApp) registerDefaultHooks() {
return nil
})
app.OnTerminate().Add(func(e *TerminateEvent) error {
app.ResetBootstrapState()
return nil
})
}

248
core/base_backup.go Normal file
View File

@ -0,0 +1,248 @@
package core
import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"runtime"
"time"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/tools/archive"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/security"
"github.com/spf13/cast"
)
const CacheActiveBackupsKey string = "@activeBackup"
// CreateBackup creates a new backup of the current app pb_data directory.
//
// If name is empty, it will be autogenerated.
// If backup with the same name exists, the new backup file will replace it.
//
// The backup is executed within a transaction, meaning that new writes
// will be temporary "blocked" until the backup file is generated.
//
// By default backups are stored in pb_data/backups
// (the backups directory itself is excluded from the generated backup).
//
// When using S3 storage for the uploaded collection files, you have to
// take care manually to backup those since they are not part of the pb_data.
//
// Backups can be stored on S3 if it is configured in app.Settings().Backups.
func (app *BaseApp) CreateBackup(ctx context.Context, name string) error {
canBackup := cast.ToString(app.Cache().Get(CacheActiveBackupsKey)) != ""
if canBackup {
return errors.New("try again later - another backup/restore process has already been started")
}
// auto generate backup name
if name == "" {
name = fmt.Sprintf(
"pb_backup_%s.zip",
time.Now().UTC().Format("20060102150405"),
)
}
app.Cache().Set(CacheActiveBackupsKey, name)
defer app.Cache().Remove(CacheActiveBackupsKey)
// Archive pb_data in a temp directory, exluding the "backups" dir itself (if exist).
//
// Run in transaction to temporary block other writes (transactions uses the NonconcurrentDB connection).
// ---
tempPath := filepath.Join(os.TempDir(), "pb_backup_"+security.PseudorandomString(4))
createErr := app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
if err := archive.Create(app.DataDir(), tempPath, LocalBackupsDirName); err != nil {
return err
}
return nil
})
if createErr != nil {
return createErr
}
defer os.Remove(tempPath)
// Persist the backup in the backups filesystem.
// ---
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
file, err := filesystem.NewFileFromPath(tempPath)
if err != nil {
return err
}
file.OriginalName = name
file.Name = file.OriginalName
if err := fsys.UploadFile(file, file.Name); err != nil {
return err
}
return nil
}
// RestoreBackup restores the backup with the specified name and restarts
// the current running application process.
//
// NB! This feature is experimental and currently is expected to work only on UNIX based systems.
//
// To safely perform the restore it is recommended to have free disk space
// for at least 2x the size of the restored pb_data backup.
//
// The performed steps are:
//
// 1. Download the backup with the specified name in a temp location
// (this is in case of S3; otherwise it creates a temp copy of the zip)
//
// 2. Extract the backup in a temp directory next to the app "pb_data"
// (eg. "pb_data/../pb_data_to_restore").
//
// 3. Move the current app "pb_data" under a special sub temp dir that
// will be deleted on the next app start up (eg. "pb_data_to_restore/.pb_temp_to_delete/").
// This is because on some operating systems it may not be allowed
// to delete the currently open "pb_data" files.
//
// 4. Rename the extracted dir from step 1 as the new "pb_data".
//
// 5. Move from the old "pb_data" any local backups that may have been
// created previously to the new "pb_data/backups".
//
// 6. Restart the app (on successfull app bootstap it will also remove the old pb_data).
//
// If a failure occure during the restore process the dir changes are reverted.
// It for whatever reason the revert is not possible, it panics.
func (app *BaseApp) RestoreBackup(ctx context.Context, name string) error {
if runtime.GOOS == "windows" {
return errors.New("restore is not supported on windows")
}
canBackup := cast.ToString(app.Cache().Get(CacheActiveBackupsKey)) != ""
if canBackup {
return errors.New("try again later - another backup/restore process has already been started")
}
app.Cache().Set(CacheActiveBackupsKey, name)
defer app.Cache().Remove(CacheActiveBackupsKey)
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
// fetch the backup file in a temp location
br, err := fsys.GetFile(name)
if err != nil {
return err
}
defer br.Close()
tempZip, err := os.CreateTemp(os.TempDir(), "pb_restore")
if err != nil {
return err
}
defer os.Remove(tempZip.Name())
if _, err := io.Copy(tempZip, br); err != nil {
return err
}
parentDataDir := filepath.Dir(app.DataDir())
extractedDataDir := filepath.Join(parentDataDir, "pb_restore_"+security.PseudorandomString(4))
defer os.RemoveAll(extractedDataDir)
if err := archive.Extract(tempZip.Name(), extractedDataDir); err != nil {
return err
}
// ensure that a database file exists
extractedDB := filepath.Join(extractedDataDir, "data.db")
if _, err := os.Stat(extractedDB); err != nil {
return fmt.Errorf("data.db file is missing or invalid: %w", err)
}
// remove the extracted zip file since we no longer need it
// (this is in case the app restarts and the defer calls are not called)
if err := os.Remove(tempZip.Name()); err != nil && app.IsDebug() {
log.Println(err)
}
// make sure that a special temp directory exists in the extracted one
if err := os.MkdirAll(filepath.Join(extractedDataDir, LocalTempDirName), os.ModePerm); err != nil {
return fmt.Errorf("failed to create a temp dir: %w", err)
}
// move the current pb_data to a special temp location that will
// hold the old data between dirs replace
// (the temp dir will be automatically removed on the next app start)
oldTempDataDir := filepath.Join(extractedDataDir, LocalTempDirName, "old_pb_data")
if err := os.Rename(app.DataDir(), oldTempDataDir); err != nil {
return fmt.Errorf("failed to move the current pb_data to a temp location: %w", err)
}
// "restore", aka. set the extracted backup as the new pb_data directory
if err := os.Rename(extractedDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to set the extracted backup as pb_data dir: %w", err)
}
// update the old temp data dir path after the restore
oldTempDataDir = filepath.Join(app.DataDir(), LocalTempDirName, "old_pb_data")
oldLocalBackupsDir := filepath.Join(oldTempDataDir, LocalBackupsDirName)
newLocalBackupsDir := filepath.Join(app.DataDir(), LocalBackupsDirName)
revertDataDirChanges := func(revertLocalBackupsDir bool) error {
if revertLocalBackupsDir {
if _, err := os.Stat(newLocalBackupsDir); err == nil {
if err := os.Rename(newLocalBackupsDir, oldLocalBackupsDir); err != nil {
return fmt.Errorf("failed to revert the backups dir change: %w", err)
}
}
}
if err := os.Rename(app.DataDir(), extractedDataDir); err != nil {
return fmt.Errorf("failed to revert the extracted dir change: %w", err)
}
if err := os.Rename(oldTempDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to revert old pb_data dir change: %w", err)
}
return nil
}
// restore the local pb_data/backups dir (if any)
if _, err := os.Stat(oldLocalBackupsDir); err == nil {
if err := os.Rename(oldLocalBackupsDir, newLocalBackupsDir); err != nil {
if err := revertDataDirChanges(true); err != nil && app.IsDebug() {
log.Println(err)
}
return fmt.Errorf("failed to move the local pb_data/backups dir: %w", err)
}
}
// restart the app
if err := app.Restart(); err != nil {
if err := revertDataDirChanges(false); err != nil {
panic(err)
}
return fmt.Errorf("failed to restart the app process: %w", err)
}
return nil
}

View File

@ -236,3 +236,33 @@ func TestBaseAppNewFilesystem(t *testing.T) {
t.Fatalf("Expected nil s3 filesystem, got %v", s3)
}
}
func TestBaseAppNewBackupsFilesystem(t *testing.T) {
const testDataDir = "./pb_base_app_test_data_dir/"
defer os.RemoveAll(testDataDir)
app := NewBaseApp(&BaseAppConfig{
DataDir: testDataDir,
EncryptionEnv: "pb_test_env",
IsDebug: false,
})
// local
local, localErr := app.NewBackupsFilesystem()
if localErr != nil {
t.Fatal(localErr)
}
if local == nil {
t.Fatal("Expected local backups filesystem instance, got nil")
}
// misconfigured s3
app.Settings().Backups.S3.Enabled = true
s3, s3Err := app.NewBackupsFilesystem()
if s3Err == nil {
t.Fatal("Expected S3 error, got nil")
}
if s3 != nil {
t.Fatalf("Expected nil s3 backups filesystem, got %v", s3)
}
}

View File

@ -65,6 +65,10 @@ type BootstrapEvent struct {
App App
}
type TerminateEvent struct {
App App
}
type ServeEvent struct {
App App
Router *echo.Echo

76
forms/backup_create.go Normal file
View File

@ -0,0 +1,76 @@
package forms
import (
"context"
"regexp"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/pocketbase/pocketbase/core"
)
var backupNameRegex = regexp.MustCompile(`^[a-z0-9_-]+\.zip$`)
// BackupCreate is a request form for creating a new app backup.
type BackupCreate struct {
app core.App
ctx context.Context
Name string `form:"name" json:"name"`
}
// NewBackupCreate creates new BackupCreate request form.
func NewBackupCreate(app core.App) *BackupCreate {
return &BackupCreate{
app: app,
ctx: context.Background(),
}
}
// SetContext replaces the default form context with the provided one.
func (form *BackupCreate) SetContext(ctx context.Context) {
form.ctx = ctx
}
// Validate makes the form validatable by implementing [validation.Validatable] interface.
func (form *BackupCreate) Validate() error {
return validation.ValidateStruct(form,
validation.Field(
&form.Name,
validation.Length(1, 100),
validation.Match(backupNameRegex),
validation.By(form.checkUniqueName),
),
)
}
func (form *BackupCreate) checkUniqueName(value any) error {
v, _ := value.(string)
fsys, err := form.app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(form.ctx)
if exists, err := fsys.Exists(v); err != nil || exists {
return validation.NewError("validation_backup_name_exists", "The backup file name is invalid or already exists.")
}
return nil
}
// Submit validates the form and creates the app backup.
//
// You can optionally provide a list of InterceptorFunc to further
// modify the form behavior before creating the backup.
func (form *BackupCreate) Submit(interceptors ...InterceptorFunc[string]) error {
if err := form.Validate(); err != nil {
return err
}
return runInterceptors(form.Name, func(name string) error {
return form.app.CreateBackup(form.ctx, name)
}, interceptors...)
}

View File

@ -28,6 +28,9 @@ type Settings struct {
Smtp SmtpConfig `form:"smtp" json:"smtp"`
S3 S3Config `form:"s3" json:"s3"`
// @todo update tests
Backups BackupsConfig `form:"backups" json:"backups"`
AdminAuthToken TokenConfig `form:"adminAuthToken" json:"adminAuthToken"`
AdminPasswordResetToken TokenConfig `form:"adminPasswordResetToken" json:"adminPasswordResetToken"`
AdminFileToken TokenConfig `form:"adminFileToken" json:"adminFileToken"`
@ -94,7 +97,7 @@ func New() *Settings {
},
AdminFileToken: TokenConfig{
Secret: security.RandomString(50),
Duration: 300, // 5 minutes
Duration: 120, // 2 minutes
},
RecordAuthToken: TokenConfig{
Secret: security.RandomString(50),
@ -110,7 +113,7 @@ func New() *Settings {
},
RecordFileToken: TokenConfig{
Secret: security.RandomString(50),
Duration: 300, // 5 minutes
Duration: 120, // 2 minutes
},
RecordEmailChangeToken: TokenConfig{
Secret: security.RandomString(50),
@ -393,6 +396,33 @@ func (c S3Config) Validate() error {
// -------------------------------------------------------------------
type BackupsConfig struct {
AutoInterval BackupInterval `form:"autoInterval" json:"autoInterval"`
AutoMaxRetention int `form:"autoMaxRetention" json:"autoMaxRetention"`
S3 S3Config `form:"s3" json:"s3"`
}
// Validate makes BackupsConfig validatable by implementing [validation.Validatable] interface.
func (c BackupsConfig) Validate() error {
return validation.ValidateStruct(&c,
validation.Field(&c.S3),
)
}
// @todo
type BackupInterval struct {
Day int
}
// Validate makes BackupInterval validatable by implementing [validation.Validatable] interface.
func (c BackupInterval) Validate() error {
return validation.ValidateStruct(&c,
validation.Field(&c.Day),
)
}
// -------------------------------------------------------------------
type MetaConfig struct {
AppName string `form:"appName" json:"appName"`
AppUrl string `form:"appUrl" json:"appUrl"`

View File

@ -156,7 +156,7 @@ func (pb *PocketBase) Execute() error {
done := make(chan bool, 1)
// wait for interrupt signal to gracefully shutdown the application
// listen for interrupt signal to gracefully shutdown the application
go func() {
sigch := make(chan os.Signal, 1)
signal.Notify(sigch, os.Interrupt, syscall.SIGTERM)

View File

@ -5,10 +5,19 @@ import (
"io"
"io/fs"
"os"
"path/filepath"
"strings"
)
// Create creates a new zip archive from src dir content and saves it in dest path.
func Create(src, dest string) error {
//
// You can specify skipPaths to skip/ignore certain directories and files (relative to src)
// preventing adding them in the final archive.
func Create(src string, dest string, skipPaths ...string) error {
if err := os.MkdirAll(filepath.Dir(dest), os.ModePerm); err != nil {
return err
}
zf, err := os.Create(dest)
if err != nil {
return err
@ -18,8 +27,8 @@ func Create(src, dest string) error {
zw := zip.NewWriter(zf)
defer zw.Close()
if err := zipAddFS(zw, os.DirFS(src)); err != nil {
// try to cleanup the created zip file
if err := zipAddFS(zw, os.DirFS(src), skipPaths...); err != nil {
// try to cleanup at least the created zip file
os.Remove(dest)
return err
@ -29,7 +38,7 @@ func Create(src, dest string) error {
}
// note remove after similar method is added in the std lib (https://github.com/golang/go/issues/54898)
func zipAddFS(w *zip.Writer, fsys fs.FS) error {
func zipAddFS(w *zip.Writer, fsys fs.FS, skipPaths ...string) error {
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
if err != nil {
return err
@ -39,6 +48,14 @@ func zipAddFS(w *zip.Writer, fsys fs.FS) error {
return nil
}
// skip
for _, ignore := range skipPaths {
if ignore == name ||
strings.HasPrefix(name+string(os.PathSeparator), filepath.Clean(ignore)+string(os.PathSeparator)) {
return nil
}
}
info, err := d.Info()
if err != nil {
return err

View File

@ -34,8 +34,8 @@ func TestCreateSuccess(t *testing.T) {
zipPath := filepath.Join(os.TempDir(), zipName)
defer os.RemoveAll(zipPath)
// zip testDir content
if err := archive.Create(testDir, zipPath); err != nil {
// zip testDir content (excluding test and a/b/c dir)
if err := archive.Create(testDir, zipPath, "a/b/c", "test"); err != nil {
t.Fatalf("Failed to create archive: %v", err)
}
@ -48,7 +48,7 @@ func TestCreateSuccess(t *testing.T) {
t.Fatalf("Expected zip with name %q, got %q", zipName, name)
}
expectedSize := int64(300)
expectedSize := int64(405)
if size := info.Size(); size != expectedSize {
t.Fatalf("Expected zip with size %d, got %d", expectedSize, size)
}
@ -68,17 +68,53 @@ func createTestDir(t *testing.T) string {
t.Fatal(err)
}
sub1, err := os.OpenFile(filepath.Join(dir, "a/sub1.txt"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
{
f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
sub1.Close()
sub2, err := os.OpenFile(filepath.Join(dir, "a/b/c/sub2.txt"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
{
f, err := os.OpenFile(filepath.Join(dir, "test2"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/test"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/b/sub1"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/b/c/sub2"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/b/c/sub3"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
sub2.Close()
return dir
}

View File

@ -1,6 +1,7 @@
package archive_test
import (
"io/fs"
"os"
"path/filepath"
"testing"
@ -13,15 +14,15 @@ func TestExtractFailure(t *testing.T) {
defer os.RemoveAll(testDir)
missingZipPath := filepath.Join(os.TempDir(), "pb_missing_test.zip")
extractPath := filepath.Join(os.TempDir(), "pb_zip_extract")
defer os.RemoveAll(extractPath)
extractedPath := filepath.Join(os.TempDir(), "pb_zip_extract")
defer os.RemoveAll(extractedPath)
if err := archive.Extract(missingZipPath, extractPath); err == nil {
if err := archive.Extract(missingZipPath, extractedPath); err == nil {
t.Fatal("Expected Extract to fail due to missing zipPath")
}
if _, err := os.Stat(extractPath); err == nil {
t.Fatalf("Expected %q to not be created", extractPath)
if _, err := os.Stat(extractedPath); err == nil {
t.Fatalf("Expected %q to not be created", extractedPath)
}
}
@ -32,26 +33,55 @@ func TestExtractSuccess(t *testing.T) {
zipPath := filepath.Join(os.TempDir(), "pb_test.zip")
defer os.RemoveAll(zipPath)
extractPath := filepath.Join(os.TempDir(), "pb_zip_extract")
defer os.RemoveAll(extractPath)
extractedPath := filepath.Join(os.TempDir(), "pb_zip_extract")
defer os.RemoveAll(extractedPath)
// zip testDir content
if err := archive.Create(testDir, zipPath); err != nil {
// zip testDir content (with exclude)
if err := archive.Create(testDir, zipPath, "a/b/c", "test", "sub2"); err != nil {
t.Fatalf("Failed to create archive: %v", err)
}
if err := archive.Extract(zipPath, extractPath); err != nil {
t.Fatalf("Failed to extract %q in %q", zipPath, extractPath)
if err := archive.Extract(zipPath, extractedPath); err != nil {
t.Fatalf("Failed to extract %q in %q", zipPath, extractedPath)
}
pathsToCheck := []string{
filepath.Join(extractPath, "a/sub1.txt"),
filepath.Join(extractPath, "a/b/c/sub2.txt"),
}
availableFiles := []string{}
for _, p := range pathsToCheck {
if _, err := os.Stat(p); err != nil {
t.Fatalf("Failed to retrieve extracted file %q: %v", p, err)
walkErr := filepath.WalkDir(extractedPath, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
availableFiles = append(availableFiles, path)
return nil
})
if walkErr != nil {
t.Fatalf("Failed to read the extracted dir: %v", walkErr)
}
expectedFiles := []string{
filepath.Join(extractedPath, "test2"),
filepath.Join(extractedPath, "a/test"),
filepath.Join(extractedPath, "a/b/sub1"),
}
if len(availableFiles) != len(expectedFiles) {
t.Fatalf("Expected \n%v, \ngot \n%v", expectedFiles, availableFiles)
}
ExpectedLoop:
for _, expected := range expectedFiles {
for _, available := range availableFiles {
if available == expected {
continue ExpectedLoop
}
}
t.Fatalf("Missing file %q in \n%v", expected, availableFiles)
}
}

View File

@ -82,6 +82,12 @@ func NewLocal(dirPath string) (*System, error) {
return &System{ctx: ctx, bucket: bucket}, nil
}
// @todo add test
// SetContext assigns the specified context to the current filesystem.
func (s *System) SetContext(ctx context.Context) {
s.ctx = ctx
}
// Close releases any resources used for the related filesystem.
func (s *System) Close() error {
return s.bucket.Close()
@ -109,6 +115,28 @@ func (s *System) GetFile(fileKey string) (*blob.Reader, error) {
return br, nil
}
// List returns a flat list with info for all files under the specified prefix.
func (s *System) List(prefix string) ([]*blob.ListObject, error) {
files := []*blob.ListObject{}
iter := s.bucket.List(&blob.ListOptions{
Prefix: prefix,
})
for {
obj, err := iter.Next(s.ctx)
if err != nil {
if err != io.EOF {
return nil, err
}
break
}
files = append(files, obj)
}
return files, nil
}
// Upload writes content into the fileKey location.
func (s *System) Upload(content []byte, fileKey string) error {
opts := &blob.WriterOptions{

View File

@ -401,6 +401,67 @@ func TestFileSystemGetFile(t *testing.T) {
}
}
func TestFileSystemList(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fs, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fs.Close()
scenarios := []struct {
prefix string
expected []string
}{
{
"",
[]string{
"image.png",
"image.svg",
"image_! noext",
"style.css",
"test/sub1.txt",
"test/sub2.txt",
},
},
{
"test",
[]string{
"test/sub1.txt",
"test/sub2.txt",
},
},
{
"missing",
[]string{},
},
}
for _, s := range scenarios {
objs, err := fs.List(s.prefix)
if err != nil {
t.Fatalf("[%s] %v", s.prefix, err)
}
if len(s.expected) != len(objs) {
t.Fatalf("[%s] Expected %d files, got \n%v", s.prefix, len(s.expected), objs)
}
ObjsLoop:
for _, obj := range objs {
for _, name := range s.expected {
if name == obj.Key {
continue ObjsLoop
}
}
t.Fatalf("[%s] Unexpected file %q", s.prefix, obj.Key)
}
}
}
func TestFileSystemServeSingleRange(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)