diff --git a/plugins/jsvm/internal/types/generated/types.d.ts b/plugins/jsvm/internal/types/generated/types.d.ts index b02be9d0..e8ed746b 100644 --- a/plugins/jsvm/internal/types/generated/types.d.ts +++ b/plugins/jsvm/internal/types/generated/types.d.ts @@ -1,4 +1,4 @@ -// 1727605671 +// 1727706940 // GENERATED CODE - DO NOT MODIFY BY HAND // ------------------------------------------------------------------- @@ -1762,8 +1762,8 @@ namespace os { * than ReadFrom. This is used to permit ReadFrom to call io.Copy * without leading to a recursive call to ReadFrom. */ - type _subrrRcn = noReadFrom&File - interface fileWithoutReadFrom extends _subrrRcn { + type _subzVvuL = noReadFrom&File + interface fileWithoutReadFrom extends _subzVvuL { } interface File { /** @@ -1807,8 +1807,8 @@ namespace os { * than WriteTo. This is used to permit WriteTo to call io.Copy * without leading to a recursive call to WriteTo. */ - type _subSJIke = noWriteTo&File - interface fileWithoutWriteTo extends _subSJIke { + type _subIhiTs = noWriteTo&File + interface fileWithoutWriteTo extends _subIhiTs { } interface File { /** @@ -2452,8 +2452,8 @@ namespace os { * * The methods of File are safe for concurrent use. */ - type _subcHuOE = file - interface File extends _subcHuOE { + type _subXEFTG = file + interface File extends _subXEFTG { } /** * A FileInfo describes a file and is returned by [Stat] and [Lstat]. @@ -2845,132 +2845,6 @@ namespace filepath { } } -/** - * Package exec runs external commands. It wraps os.StartProcess to make it - * easier to remap stdin and stdout, connect I/O with pipes, and do other - * adjustments. - * - * Unlike the "system" library call from C and other languages, the - * os/exec package intentionally does not invoke the system shell and - * does not expand any glob patterns or handle other expansions, - * pipelines, or redirections typically done by shells. The package - * behaves more like C's "exec" family of functions. To expand glob - * patterns, either call the shell directly, taking care to escape any - * dangerous input, or use the [path/filepath] package's Glob function. - * To expand environment variables, use package os's ExpandEnv. - * - * Note that the examples in this package assume a Unix system. - * They may not run on Windows, and they do not run in the Go Playground - * used by golang.org and godoc.org. - * - * # Executables in the current directory - * - * The functions [Command] and [LookPath] look for a program - * in the directories listed in the current path, following the - * conventions of the host operating system. - * Operating systems have for decades included the current - * directory in this search, sometimes implicitly and sometimes - * configured explicitly that way by default. - * Modern practice is that including the current directory - * is usually unexpected and often leads to security problems. - * - * To avoid those security problems, as of Go 1.19, this package will not resolve a program - * using an implicit or explicit path entry relative to the current directory. - * That is, if you run [LookPath]("go"), it will not successfully return - * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. - * Instead, if the usual path algorithms would result in that answer, - * these functions return an error err satisfying [errors.Is](err, [ErrDot]). - * - * For example, consider these two program snippets: - * - * ``` - * path, err := exec.LookPath("prog") - * if err != nil { - * log.Fatal(err) - * } - * use(path) - * ``` - * - * and - * - * ``` - * cmd := exec.Command("prog") - * if err := cmd.Run(); err != nil { - * log.Fatal(err) - * } - * ``` - * - * These will not find and run ./prog or .\prog.exe, - * no matter how the current path is configured. - * - * Code that always wants to run a program from the current directory - * can be rewritten to say "./prog" instead of "prog". - * - * Code that insists on including results from relative path entries - * can instead override the error using an errors.Is check: - * - * ``` - * path, err := exec.LookPath("prog") - * if errors.Is(err, exec.ErrDot) { - * err = nil - * } - * if err != nil { - * log.Fatal(err) - * } - * use(path) - * ``` - * - * and - * - * ``` - * cmd := exec.Command("prog") - * if errors.Is(cmd.Err, exec.ErrDot) { - * cmd.Err = nil - * } - * if err := cmd.Run(); err != nil { - * log.Fatal(err) - * } - * ``` - * - * Setting the environment variable GODEBUG=execerrdot=0 - * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19 - * behavior for programs that are unable to apply more targeted fixes. - * A future version of Go may remove support for this variable. - * - * Before adding such overrides, make sure you understand the - * security implications of doing so. - * See https://go.dev/blog/path-security for more information. - */ -namespace exec { - interface command { - /** - * Command returns the [Cmd] struct to execute the named program with - * the given arguments. - * - * It sets only the Path and Args in the returned structure. - * - * If name contains no path separators, Command uses [LookPath] to - * resolve name to a complete path if possible. Otherwise it uses name - * directly as Path. - * - * The returned Cmd's Args field is constructed from the command name - * followed by the elements of arg, so arg should not include the - * command name itself. For example, Command("echo", "hello"). - * Args[0] is always name, not the possibly resolved Path. - * - * On Windows, processes receive the whole command line as a single string - * and do their own parsing. Command combines and quotes Args into a command - * line string with an algorithm compatible with applications using - * CommandLineToArgvW (which is the most common way). Notable exceptions are - * msiexec.exe and cmd.exe (and thus, all batch files), which have a different - * unquoting algorithm. In these or other similar cases, you can do the - * quoting yourself and provide the full command line in SysProcAttr.CmdLine, - * leaving Args empty. - */ - (name: string, ...arg: string[]): (Cmd) - } -} - /** * Package validation provides configurable and extensible rules for validating data of various types. */ @@ -3325,14 +3199,14 @@ namespace dbx { /** * MssqlBuilder is the builder for SQL Server databases. */ - type _subtdfax = BaseBuilder - interface MssqlBuilder extends _subtdfax { + type _subfHKiD = BaseBuilder + interface MssqlBuilder extends _subfHKiD { } /** * MssqlQueryBuilder is the query builder for SQL Server databases. */ - type _sublQypF = BaseQueryBuilder - interface MssqlQueryBuilder extends _sublQypF { + type _subaJyIt = BaseQueryBuilder + interface MssqlQueryBuilder extends _subaJyIt { } interface newMssqlBuilder { /** @@ -3403,8 +3277,8 @@ namespace dbx { /** * MysqlBuilder is the builder for MySQL databases. */ - type _subtKrsG = BaseBuilder - interface MysqlBuilder extends _subtKrsG { + type _subrmOIN = BaseBuilder + interface MysqlBuilder extends _subrmOIN { } interface newMysqlBuilder { /** @@ -3479,14 +3353,14 @@ namespace dbx { /** * OciBuilder is the builder for Oracle databases. */ - type _subTevke = BaseBuilder - interface OciBuilder extends _subTevke { + type _subsqtur = BaseBuilder + interface OciBuilder extends _subsqtur { } /** * OciQueryBuilder is the query builder for Oracle databases. */ - type _subFRAPn = BaseQueryBuilder - interface OciQueryBuilder extends _subFRAPn { + type _subAZCwZ = BaseQueryBuilder + interface OciQueryBuilder extends _subAZCwZ { } interface newOciBuilder { /** @@ -3549,8 +3423,8 @@ namespace dbx { /** * PgsqlBuilder is the builder for PostgreSQL databases. */ - type _subKKaBH = BaseBuilder - interface PgsqlBuilder extends _subKKaBH { + type _subPkaII = BaseBuilder + interface PgsqlBuilder extends _subPkaII { } interface newPgsqlBuilder { /** @@ -3617,8 +3491,8 @@ namespace dbx { /** * SqliteBuilder is the builder for SQLite databases. */ - type _subRyJlz = BaseBuilder - interface SqliteBuilder extends _subRyJlz { + type _subsrUsp = BaseBuilder + interface SqliteBuilder extends _subsrUsp { } interface newSqliteBuilder { /** @@ -3717,8 +3591,8 @@ namespace dbx { /** * StandardBuilder is the builder that is used by DB for an unknown driver. */ - type _subkIcuU = BaseBuilder - interface StandardBuilder extends _subkIcuU { + type _subQGlCO = BaseBuilder + interface StandardBuilder extends _subQGlCO { } interface newStandardBuilder { /** @@ -3784,8 +3658,8 @@ namespace dbx { * DB enhances sql.DB by providing a set of DB-agnostic query building methods. * DB allows easier query building and population of data into Go variables. */ - type _subExmlQ = Builder - interface DB extends _subExmlQ { + type _subopDPV = Builder + interface DB extends _subopDPV { /** * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc. */ @@ -4589,8 +4463,8 @@ namespace dbx { * Rows enhances sql.Rows by providing additional data query methods. * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row. */ - type _subSZGyU = sql.Rows - interface Rows extends _subSZGyU { + type _subwmitS = sql.Rows + interface Rows extends _subwmitS { } interface Rows { /** @@ -4948,8 +4822,8 @@ namespace dbx { }): string } interface structInfo { } - type _subOCBls = structInfo - interface structValue extends _subOCBls { + type _subawPKv = structInfo + interface structValue extends _subawPKv { } interface fieldInfo { } @@ -4988,8 +4862,8 @@ namespace dbx { /** * Tx enhances sql.Tx with additional querying methods. */ - type _subWmBdO = Builder - interface Tx extends _subWmBdO { + type _subdAhcn = Builder + interface Tx extends _subdAhcn { } interface Tx { /** @@ -5005,6 +4879,132 @@ namespace dbx { } } +/** + * Package exec runs external commands. It wraps os.StartProcess to make it + * easier to remap stdin and stdout, connect I/O with pipes, and do other + * adjustments. + * + * Unlike the "system" library call from C and other languages, the + * os/exec package intentionally does not invoke the system shell and + * does not expand any glob patterns or handle other expansions, + * pipelines, or redirections typically done by shells. The package + * behaves more like C's "exec" family of functions. To expand glob + * patterns, either call the shell directly, taking care to escape any + * dangerous input, or use the [path/filepath] package's Glob function. + * To expand environment variables, use package os's ExpandEnv. + * + * Note that the examples in this package assume a Unix system. + * They may not run on Windows, and they do not run in the Go Playground + * used by golang.org and godoc.org. + * + * # Executables in the current directory + * + * The functions [Command] and [LookPath] look for a program + * in the directories listed in the current path, following the + * conventions of the host operating system. + * Operating systems have for decades included the current + * directory in this search, sometimes implicitly and sometimes + * configured explicitly that way by default. + * Modern practice is that including the current directory + * is usually unexpected and often leads to security problems. + * + * To avoid those security problems, as of Go 1.19, this package will not resolve a program + * using an implicit or explicit path entry relative to the current directory. + * That is, if you run [LookPath]("go"), it will not successfully return + * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. + * Instead, if the usual path algorithms would result in that answer, + * these functions return an error err satisfying [errors.Is](err, [ErrDot]). + * + * For example, consider these two program snippets: + * + * ``` + * path, err := exec.LookPath("prog") + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * ``` + * + * and + * + * ``` + * cmd := exec.Command("prog") + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * ``` + * + * These will not find and run ./prog or .\prog.exe, + * no matter how the current path is configured. + * + * Code that always wants to run a program from the current directory + * can be rewritten to say "./prog" instead of "prog". + * + * Code that insists on including results from relative path entries + * can instead override the error using an errors.Is check: + * + * ``` + * path, err := exec.LookPath("prog") + * if errors.Is(err, exec.ErrDot) { + * err = nil + * } + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * ``` + * + * and + * + * ``` + * cmd := exec.Command("prog") + * if errors.Is(cmd.Err, exec.ErrDot) { + * cmd.Err = nil + * } + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * ``` + * + * Setting the environment variable GODEBUG=execerrdot=0 + * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19 + * behavior for programs that are unable to apply more targeted fixes. + * A future version of Go may remove support for this variable. + * + * Before adding such overrides, make sure you understand the + * security implications of doing so. + * See https://go.dev/blog/path-security for more information. + */ +namespace exec { + interface command { + /** + * Command returns the [Cmd] struct to execute the named program with + * the given arguments. + * + * It sets only the Path and Args in the returned structure. + * + * If name contains no path separators, Command uses [LookPath] to + * resolve name to a complete path if possible. Otherwise it uses name + * directly as Path. + * + * The returned Cmd's Args field is constructed from the command name + * followed by the elements of arg, so arg should not include the + * command name itself. For example, Command("echo", "hello"). + * Args[0] is always name, not the possibly resolved Path. + * + * On Windows, processes receive the whole command line as a single string + * and do their own parsing. Command combines and quotes Args into a command + * line string with an algorithm compatible with applications using + * CommandLineToArgvW (which is the most common way). Notable exceptions are + * msiexec.exe and cmd.exe (and thus, all batch files), which have a different + * unquoting algorithm. In these or other similar cases, you can do the + * quoting yourself and provide the full command line in SysProcAttr.CmdLine, + * leaving Args empty. + */ + (name: string, ...arg: string[]): (Cmd) + } +} + namespace security { interface s256Challenge { /** @@ -5244,8 +5244,8 @@ namespace filesystem { */ open(): io.ReadSeekCloser } - type _subteRxd = bytes.Reader - interface bytesReadSeekCloser extends _subteRxd { + type _subegBIl = bytes.Reader + interface bytesReadSeekCloser extends _subegBIl { } interface bytesReadSeekCloser { /** @@ -5442,111 +5442,6 @@ namespace mails { } } -/** - * Package template is a thin wrapper around the standard html/template - * and text/template packages that implements a convenient registry to - * load and cache templates on the fly concurrently. - * - * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. - * - * Example: - * - * ``` - * registry := template.NewRegistry() - * - * html1, err := registry.LoadFiles( - * // the files set wil be parsed only once and then cached - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "John"}) - * - * html2, err := registry.LoadFiles( - * // reuse the already parsed and cached files set - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "Jane"}) - * ``` - */ -namespace template { - interface newRegistry { - /** - * NewRegistry creates and initializes a new templates registry with - * some defaults (eg. global "raw" template function for unescaped HTML). - * - * Use the Registry.Load* methods to load templates into the registry. - */ - (): (Registry) - } - /** - * Registry defines a templates registry that is safe to be used by multiple goroutines. - * - * Use the Registry.Load* methods to load templates into the registry. - */ - interface Registry { - } - interface Registry { - /** - * AddFuncs registers new global template functions. - * - * The key of each map entry is the function name that will be used in the templates. - * If a function with the map entry name already exists it will be replaced with the new one. - * - * The value of each map entry is a function that must have either a - * single return value, or two return values of which the second has type error. - * - * Example: - * - * ``` - * r.AddFuncs(map[string]any{ - * "toUpper": func(str string) string { - * return strings.ToUppser(str) - * }, - * ... - * }) - * ``` - */ - addFuncs(funcs: _TygojaDict): (Registry) - } - interface Registry { - /** - * LoadFiles caches (if not already) the specified filenames set as a - * single template and returns a ready to use Renderer instance. - * - * There must be at least 1 filename specified. - */ - loadFiles(...filenames: string[]): (Renderer) - } - interface Registry { - /** - * LoadString caches (if not already) the specified inline string as a - * single template and returns a ready to use Renderer instance. - */ - loadString(text: string): (Renderer) - } - interface Registry { - /** - * LoadFS caches (if not already) the specified fs and globPatterns - * pair as single template and returns a ready to use Renderer instance. - * - * There must be at least 1 file matching the provided globPattern(s) - * (note that most file names serves as glob patterns matching themselves). - */ - loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) - } - /** - * Renderer defines a single parsed template. - */ - interface Renderer { - } - interface Renderer { - /** - * Render executes the template with the specified data as the dot object - * and returns the result as plain string. - */ - render(data: any): string - } -} - namespace forms { // @ts-ignore import validation = ozzo_validation @@ -5847,13 +5742,6 @@ namespace apis { */ (fsys: fs.FS, indexFallback: boolean): hook.HandlerFunc } - interface findUploadedFiles { - /** - * FindUploadedFiles extracts all form files of "key" from a http request - * and returns a slice with filesystem.File instances (if any). - */ - (r: http.Request, key: string): Array<(filesystem.File | undefined)> - } interface HandleFunc {(e: core.RequestEvent): void } interface BatchActionHandlerFunc {(app: CoreApp, ir: core.InternalRequest, params: _TygojaDict, next: () => void): HandleFunc } interface BatchRequestResult { @@ -5968,8 +5856,8 @@ namespace apis { */ (limitBytes: number): (hook.Handler) } - type _subWJQoF = io.ReadCloser - interface limitedReader extends _subWJQoF { + type _subuABjn = io.ReadCloser + interface limitedReader extends _subuABjn { } interface limitedReader { read(b: string|Array): number @@ -6120,8 +6008,8 @@ namespace apis { */ (config: GzipConfig): hook.HandlerFunc } - type _subpcIaI = http.ResponseWriter&io.Writer - interface gzipResponseWriter extends _subpcIaI { + type _subPqGFq = http.ResponseWriter&io.Writer + interface gzipResponseWriter extends _subPqGFq { } interface gzipResponseWriter { writeHeader(code: number): void @@ -6144,11 +6032,11 @@ namespace apis { interface gzipResponseWriter { unwrap(): http.ResponseWriter } - type _subaqIgM = sync.RWMutex - interface rateLimiter extends _subaqIgM { + type _subnUksC = sync.RWMutex + interface rateLimiter extends _subnUksC { } - type _subPpcNi = sync.Mutex - interface fixedWindow extends _subPpcNi { + type _subWjXlJ = sync.Mutex + interface fixedWindow extends _subWjXlJ { } interface realtimeSubscribeForm { clientId: string @@ -6391,6 +6279,111 @@ namespace apis { } } +/** + * Package template is a thin wrapper around the standard html/template + * and text/template packages that implements a convenient registry to + * load and cache templates on the fly concurrently. + * + * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. + * + * Example: + * + * ``` + * registry := template.NewRegistry() + * + * html1, err := registry.LoadFiles( + * // the files set wil be parsed only once and then cached + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "John"}) + * + * html2, err := registry.LoadFiles( + * // reuse the already parsed and cached files set + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "Jane"}) + * ``` + */ +namespace template { + interface newRegistry { + /** + * NewRegistry creates and initializes a new templates registry with + * some defaults (eg. global "raw" template function for unescaped HTML). + * + * Use the Registry.Load* methods to load templates into the registry. + */ + (): (Registry) + } + /** + * Registry defines a templates registry that is safe to be used by multiple goroutines. + * + * Use the Registry.Load* methods to load templates into the registry. + */ + interface Registry { + } + interface Registry { + /** + * AddFuncs registers new global template functions. + * + * The key of each map entry is the function name that will be used in the templates. + * If a function with the map entry name already exists it will be replaced with the new one. + * + * The value of each map entry is a function that must have either a + * single return value, or two return values of which the second has type error. + * + * Example: + * + * ``` + * r.AddFuncs(map[string]any{ + * "toUpper": func(str string) string { + * return strings.ToUppser(str) + * }, + * ... + * }) + * ``` + */ + addFuncs(funcs: _TygojaDict): (Registry) + } + interface Registry { + /** + * LoadFiles caches (if not already) the specified filenames set as a + * single template and returns a ready to use Renderer instance. + * + * There must be at least 1 filename specified. + */ + loadFiles(...filenames: string[]): (Renderer) + } + interface Registry { + /** + * LoadString caches (if not already) the specified inline string as a + * single template and returns a ready to use Renderer instance. + */ + loadString(text: string): (Renderer) + } + interface Registry { + /** + * LoadFS caches (if not already) the specified fs and globPatterns + * pair as single template and returns a ready to use Renderer instance. + * + * There must be at least 1 file matching the provided globPattern(s) + * (note that most file names serves as glob patterns matching themselves). + */ + loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) + } + /** + * Renderer defines a single parsed template. + */ + interface Renderer { + } + interface Renderer { + /** + * Render executes the template with the specified data as the dot object + * and returns the result as plain string. + */ + render(data: any): string + } +} + namespace pocketbase { /** * PocketBase defines a PocketBase app launcher. @@ -6398,8 +6391,8 @@ namespace pocketbase { * It implements [CoreApp] via embedding and all of the app interface methods * could be accessed directly through the instance (eg. PocketBase.DataDir()). */ - type _subltdOS = CoreApp - interface PocketBase extends _subltdOS { + type _subZEvMV = CoreApp + interface PocketBase extends _subZEvMV { /** * RootCmd is the main console command */ @@ -6631,158 +6624,6 @@ namespace sync { } } -/** - * Package syscall contains an interface to the low-level operating system - * primitives. The details vary depending on the underlying system, and - * by default, godoc will display the syscall documentation for the current - * system. If you want godoc to display syscall documentation for another - * system, set $GOOS and $GOARCH to the desired system. For example, if - * you want to view documentation for freebsd/arm on linux/amd64, set $GOOS - * to freebsd and $GOARCH to arm. - * The primary use of syscall is inside other packages that provide a more - * portable interface to the system, such as "os", "time" and "net". Use - * those packages rather than this one if you can. - * For details of the functions and data types in this package consult - * the manuals for the appropriate operating system. - * These calls return err == nil to indicate success; otherwise - * err is an operating system error describing the failure. - * On most systems, that error has type [Errno]. - * - * NOTE: Most of the functions, types, and constants defined in - * this package are also available in the [golang.org/x/sys] package. - * That package has more system call support than this one, - * and most new code should prefer that package where possible. - * See https://golang.org/s/go1.4-syscall for more information. - */ -namespace syscall { - interface SysProcAttr { - chroot: string // Chroot. - credential?: Credential // Credential. - /** - * Ptrace tells the child to call ptrace(PTRACE_TRACEME). - * Call runtime.LockOSThread before starting a process with this set, - * and don't call UnlockOSThread until done with PtraceSyscall calls. - */ - ptrace: boolean - setsid: boolean // Create session. - /** - * Setpgid sets the process group ID of the child to Pgid, - * or, if Pgid == 0, to the new child's process ID. - */ - setpgid: boolean - /** - * Setctty sets the controlling terminal of the child to - * file descriptor Ctty. Ctty must be a descriptor number - * in the child process: an index into ProcAttr.Files. - * This is only meaningful if Setsid is true. - */ - setctty: boolean - noctty: boolean // Detach fd 0 from controlling terminal. - ctty: number // Controlling TTY fd. - /** - * Foreground places the child process group in the foreground. - * This implies Setpgid. The Ctty field must be set to - * the descriptor of the controlling TTY. - * Unlike Setctty, in this case Ctty must be a descriptor - * number in the parent process. - */ - foreground: boolean - pgid: number // Child's process group ID if Setpgid. - /** - * Pdeathsig, if non-zero, is a signal that the kernel will send to - * the child process when the creating thread dies. Note that the signal - * is sent on thread termination, which may happen before process termination. - * There are more details at https://go.dev/issue/27505. - */ - pdeathsig: Signal - cloneflags: number // Flags for clone calls. - unshareflags: number // Flags for unshare calls. - uidMappings: Array // User ID mappings for user namespaces. - gidMappings: Array // Group ID mappings for user namespaces. - /** - * GidMappingsEnableSetgroups enabling setgroups syscall. - * If false, then setgroups syscall will be disabled for the child process. - * This parameter is no-op if GidMappings == nil. Otherwise for unprivileged - * users this should be set to false for mappings work. - */ - gidMappingsEnableSetgroups: boolean - ambientCaps: Array // Ambient capabilities. - useCgroupFD: boolean // Whether to make use of the CgroupFD field. - cgroupFD: number // File descriptor of a cgroup to put the new process into. - /** - * PidFD, if not nil, is used to store the pidfd of a child, if the - * functionality is supported by the kernel, or -1. Note *PidFD is - * changed only if the process starts successfully. - */ - pidFD?: number - } - // @ts-ignore - import errorspkg = errors - /** - * A RawConn is a raw network connection. - */ - interface RawConn { - [key:string]: any; - /** - * Control invokes f on the underlying connection's file - * descriptor or handle. - * The file descriptor fd is guaranteed to remain valid while - * f executes but not after f returns. - */ - control(f: (fd: number) => void): void - /** - * Read invokes f on the underlying connection's file - * descriptor or handle; f is expected to try to read from the - * file descriptor. - * If f returns true, Read returns. Otherwise Read blocks - * waiting for the connection to be ready for reading and - * tries again repeatedly. - * The file descriptor is guaranteed to remain valid while f - * executes but not after f returns. - */ - read(f: (fd: number) => boolean): void - /** - * Write is like Read but for writing. - */ - write(f: (fd: number) => boolean): void - } - // @ts-ignore - import runtimesyscall = syscall - /** - * An Errno is an unsigned number describing an error condition. - * It implements the error interface. The zero Errno is by convention - * a non-error, so code to convert from Errno to error should use: - * - * ``` - * err = nil - * if errno != 0 { - * err = errno - * } - * ``` - * - * Errno values can be tested against error values using [errors.Is]. - * For example: - * - * ``` - * _, _, err := syscall.Syscall(...) - * if errors.Is(err, fs.ErrNotExist) ... - * ``` - */ - interface Errno extends Number{} - interface Errno { - error(): string - } - interface Errno { - is(target: Error): boolean - } - interface Errno { - temporary(): boolean - } - interface Errno { - timeout(): boolean - } -} - /** * Package io provides basic interfaces to I/O primitives. * Its primary job is to wrap existing implementations of such primitives, @@ -6946,6 +6787,158 @@ namespace bytes { } } +/** + * Package syscall contains an interface to the low-level operating system + * primitives. The details vary depending on the underlying system, and + * by default, godoc will display the syscall documentation for the current + * system. If you want godoc to display syscall documentation for another + * system, set $GOOS and $GOARCH to the desired system. For example, if + * you want to view documentation for freebsd/arm on linux/amd64, set $GOOS + * to freebsd and $GOARCH to arm. + * The primary use of syscall is inside other packages that provide a more + * portable interface to the system, such as "os", "time" and "net". Use + * those packages rather than this one if you can. + * For details of the functions and data types in this package consult + * the manuals for the appropriate operating system. + * These calls return err == nil to indicate success; otherwise + * err is an operating system error describing the failure. + * On most systems, that error has type [Errno]. + * + * NOTE: Most of the functions, types, and constants defined in + * this package are also available in the [golang.org/x/sys] package. + * That package has more system call support than this one, + * and most new code should prefer that package where possible. + * See https://golang.org/s/go1.4-syscall for more information. + */ +namespace syscall { + interface SysProcAttr { + chroot: string // Chroot. + credential?: Credential // Credential. + /** + * Ptrace tells the child to call ptrace(PTRACE_TRACEME). + * Call runtime.LockOSThread before starting a process with this set, + * and don't call UnlockOSThread until done with PtraceSyscall calls. + */ + ptrace: boolean + setsid: boolean // Create session. + /** + * Setpgid sets the process group ID of the child to Pgid, + * or, if Pgid == 0, to the new child's process ID. + */ + setpgid: boolean + /** + * Setctty sets the controlling terminal of the child to + * file descriptor Ctty. Ctty must be a descriptor number + * in the child process: an index into ProcAttr.Files. + * This is only meaningful if Setsid is true. + */ + setctty: boolean + noctty: boolean // Detach fd 0 from controlling terminal. + ctty: number // Controlling TTY fd. + /** + * Foreground places the child process group in the foreground. + * This implies Setpgid. The Ctty field must be set to + * the descriptor of the controlling TTY. + * Unlike Setctty, in this case Ctty must be a descriptor + * number in the parent process. + */ + foreground: boolean + pgid: number // Child's process group ID if Setpgid. + /** + * Pdeathsig, if non-zero, is a signal that the kernel will send to + * the child process when the creating thread dies. Note that the signal + * is sent on thread termination, which may happen before process termination. + * There are more details at https://go.dev/issue/27505. + */ + pdeathsig: Signal + cloneflags: number // Flags for clone calls. + unshareflags: number // Flags for unshare calls. + uidMappings: Array // User ID mappings for user namespaces. + gidMappings: Array // Group ID mappings for user namespaces. + /** + * GidMappingsEnableSetgroups enabling setgroups syscall. + * If false, then setgroups syscall will be disabled for the child process. + * This parameter is no-op if GidMappings == nil. Otherwise for unprivileged + * users this should be set to false for mappings work. + */ + gidMappingsEnableSetgroups: boolean + ambientCaps: Array // Ambient capabilities. + useCgroupFD: boolean // Whether to make use of the CgroupFD field. + cgroupFD: number // File descriptor of a cgroup to put the new process into. + /** + * PidFD, if not nil, is used to store the pidfd of a child, if the + * functionality is supported by the kernel, or -1. Note *PidFD is + * changed only if the process starts successfully. + */ + pidFD?: number + } + // @ts-ignore + import errorspkg = errors + /** + * A RawConn is a raw network connection. + */ + interface RawConn { + [key:string]: any; + /** + * Control invokes f on the underlying connection's file + * descriptor or handle. + * The file descriptor fd is guaranteed to remain valid while + * f executes but not after f returns. + */ + control(f: (fd: number) => void): void + /** + * Read invokes f on the underlying connection's file + * descriptor or handle; f is expected to try to read from the + * file descriptor. + * If f returns true, Read returns. Otherwise Read blocks + * waiting for the connection to be ready for reading and + * tries again repeatedly. + * The file descriptor is guaranteed to remain valid while f + * executes but not after f returns. + */ + read(f: (fd: number) => boolean): void + /** + * Write is like Read but for writing. + */ + write(f: (fd: number) => boolean): void + } + // @ts-ignore + import runtimesyscall = syscall + /** + * An Errno is an unsigned number describing an error condition. + * It implements the error interface. The zero Errno is by convention + * a non-error, so code to convert from Errno to error should use: + * + * ``` + * err = nil + * if errno != 0 { + * err = errno + * } + * ``` + * + * Errno values can be tested against error values using [errors.Is]. + * For example: + * + * ``` + * _, _, err := syscall.Syscall(...) + * if errors.Is(err, fs.ErrNotExist) ... + * ``` + */ + interface Errno extends Number{} + interface Errno { + error(): string + } + interface Errno { + is(target: Error): boolean + } + interface Errno { + temporary(): boolean + } + interface Errno { + timeout(): boolean + } +} + /** * Package time provides functionality for measuring and displaying time. * @@ -7527,169 +7520,6 @@ namespace time { } } -/** - * Package context defines the Context type, which carries deadlines, - * cancellation signals, and other request-scoped values across API boundaries - * and between processes. - * - * Incoming requests to a server should create a [Context], and outgoing - * calls to servers should accept a Context. The chain of function - * calls between them must propagate the Context, optionally replacing - * it with a derived Context created using [WithCancel], [WithDeadline], - * [WithTimeout], or [WithValue]. When a Context is canceled, all - * Contexts derived from it are also canceled. - * - * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a - * Context (the parent) and return a derived Context (the child) and a - * [CancelFunc]. Calling the CancelFunc cancels the child and its - * children, removes the parent's reference to the child, and stops - * any associated timers. Failing to call the CancelFunc leaks the - * child and its children until the parent is canceled or the timer - * fires. The go vet tool checks that CancelFuncs are used on all - * control-flow paths. - * - * The [WithCancelCause] function returns a [CancelCauseFunc], which - * takes an error and records it as the cancellation cause. Calling - * [Cause] on the canceled context or any of its children retrieves - * the cause. If no cause is specified, Cause(ctx) returns the same - * value as ctx.Err(). - * - * Programs that use Contexts should follow these rules to keep interfaces - * consistent across packages and enable static analysis tools to check context - * propagation: - * - * Do not store Contexts inside a struct type; instead, pass a Context - * explicitly to each function that needs it. The Context should be the first - * parameter, typically named ctx: - * - * ``` - * func DoSomething(ctx context.Context, arg Arg) error { - * // ... use ctx ... - * } - * ``` - * - * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] - * if you are unsure about which Context to use. - * - * Use context Values only for request-scoped data that transits processes and - * APIs, not for passing optional parameters to functions. - * - * The same Context may be passed to functions running in different goroutines; - * Contexts are safe for simultaneous use by multiple goroutines. - * - * See https://blog.golang.org/context for example code for a server that uses - * Contexts. - */ -namespace context { - /** - * A Context carries a deadline, a cancellation signal, and other values across - * API boundaries. - * - * Context's methods may be called by multiple goroutines simultaneously. - */ - interface Context { - [key:string]: any; - /** - * Deadline returns the time when work done on behalf of this context - * should be canceled. Deadline returns ok==false when no deadline is - * set. Successive calls to Deadline return the same results. - */ - deadline(): [time.Time, boolean] - /** - * Done returns a channel that's closed when work done on behalf of this - * context should be canceled. Done may return nil if this context can - * never be canceled. Successive calls to Done return the same value. - * The close of the Done channel may happen asynchronously, - * after the cancel function returns. - * - * WithCancel arranges for Done to be closed when cancel is called; - * WithDeadline arranges for Done to be closed when the deadline - * expires; WithTimeout arranges for Done to be closed when the timeout - * elapses. - * - * Done is provided for use in select statements: - * - * // Stream generates values with DoSomething and sends them to out - * // until DoSomething returns an error or ctx.Done is closed. - * func Stream(ctx context.Context, out chan<- Value) error { - * for { - * v, err := DoSomething(ctx) - * if err != nil { - * return err - * } - * select { - * case <-ctx.Done(): - * return ctx.Err() - * case out <- v: - * } - * } - * } - * - * See https://blog.golang.org/pipelines for more examples of how to use - * a Done channel for cancellation. - */ - done(): undefined - /** - * If Done is not yet closed, Err returns nil. - * If Done is closed, Err returns a non-nil error explaining why: - * Canceled if the context was canceled - * or DeadlineExceeded if the context's deadline passed. - * After Err returns a non-nil error, successive calls to Err return the same error. - */ - err(): void - /** - * Value returns the value associated with this context for key, or nil - * if no value is associated with key. Successive calls to Value with - * the same key returns the same result. - * - * Use context values only for request-scoped data that transits - * processes and API boundaries, not for passing optional parameters to - * functions. - * - * A key identifies a specific value in a Context. Functions that wish - * to store values in Context typically allocate a key in a global - * variable then use that key as the argument to context.WithValue and - * Context.Value. A key can be any type that supports equality; - * packages should define keys as an unexported type to avoid - * collisions. - * - * Packages that define a Context key should provide type-safe accessors - * for the values stored using that key: - * - * ``` - * // Package user defines a User type that's stored in Contexts. - * package user - * - * import "context" - * - * // User is the type of value stored in the Contexts. - * type User struct {...} - * - * // key is an unexported type for keys defined in this package. - * // This prevents collisions with keys defined in other packages. - * type key int - * - * // userKey is the key for user.User values in Contexts. It is - * // unexported; clients use user.NewContext and user.FromContext - * // instead of using this key directly. - * var userKey key - * - * // NewContext returns a new Context that carries value u. - * func NewContext(ctx context.Context, u *User) context.Context { - * return context.WithValue(ctx, userKey, u) - * } - * - * // FromContext returns the User value stored in ctx, if any. - * func FromContext(ctx context.Context) (*User, bool) { - * u, ok := ctx.Value(userKey).(*User) - * return u, ok - * } - * ``` - */ - value(key: any): any - } -} - /** * Package fs defines basic interfaces to a file system. * A file system can be provided by the host operating system @@ -7890,664 +7720,6 @@ namespace fs { interface WalkDirFunc {(path: string, d: DirEntry, err: Error): void } } -/** - * Package sql provides a generic interface around SQL (or SQL-like) - * databases. - * - * The sql package must be used in conjunction with a database driver. - * See https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until - * after the query is completed. - * - * For usage examples, see the wiki page at - * https://golang.org/s/sqlwiki. - */ -namespace sql { - /** - * TxOptions holds the transaction options to be used in [DB.BeginTx]. - */ - interface TxOptions { - /** - * Isolation is the transaction isolation level. - * If zero, the driver or database's default level is used. - */ - isolation: IsolationLevel - readOnly: boolean - } - /** - * DB is a database handle representing a pool of zero or more - * underlying connections. It's safe for concurrent use by multiple - * goroutines. - * - * The sql package creates and frees connections automatically; it - * also maintains a free pool of idle connections. If the database has - * a concept of per-connection state, such state can be reliably observed - * within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the - * returned [Tx] is bound to a single connection. Once [Tx.Commit] or - * [Tx.Rollback] is called on the transaction, that transaction's - * connection is returned to [DB]'s idle connection pool. The pool size - * can be controlled with [DB.SetMaxIdleConns]. - */ - interface DB { - } - interface DB { - /** - * PingContext verifies a connection to the database is still alive, - * establishing a connection if necessary. - */ - pingContext(ctx: context.Context): void - } - interface DB { - /** - * Ping verifies a connection to the database is still alive, - * establishing a connection if necessary. - * - * Ping uses [context.Background] internally; to specify the context, use - * [DB.PingContext]. - */ - ping(): void - } - interface DB { - /** - * Close closes the database and prevents new queries from starting. - * Close then waits for all queries that have started processing on the server - * to finish. - * - * It is rare to Close a [DB], as the [DB] handle is meant to be - * long-lived and shared between many goroutines. - */ - close(): void - } - interface DB { - /** - * SetMaxIdleConns sets the maximum number of connections in the idle - * connection pool. - * - * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns, - * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit. - * - * If n <= 0, no idle connections are retained. - * - * The default max idle connections is currently 2. This may change in - * a future release. - */ - setMaxIdleConns(n: number): void - } - interface DB { - /** - * SetMaxOpenConns sets the maximum number of open connections to the database. - * - * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than - * MaxIdleConns, then MaxIdleConns will be reduced to match the new - * MaxOpenConns limit. - * - * If n <= 0, then there is no limit on the number of open connections. - * The default is 0 (unlimited). - */ - setMaxOpenConns(n: number): void - } - interface DB { - /** - * SetConnMaxLifetime sets the maximum amount of time a connection may be reused. - * - * Expired connections may be closed lazily before reuse. - * - * If d <= 0, connections are not closed due to a connection's age. - */ - setConnMaxLifetime(d: time.Duration): void - } - interface DB { - /** - * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle. - * - * Expired connections may be closed lazily before reuse. - * - * If d <= 0, connections are not closed due to a connection's idle time. - */ - setConnMaxIdleTime(d: time.Duration): void - } - interface DB { - /** - * Stats returns database statistics. - */ - stats(): DBStats - } - interface DB { - /** - * PrepareContext creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface DB { - /** - * Prepare creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * Prepare uses [context.Background] internally; to specify the context, use - * [DB.PrepareContext]. - */ - prepare(query: string): (Stmt) - } - interface DB { - /** - * ExecContext executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface DB { - /** - * Exec executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - * - * Exec uses [context.Background] internally; to specify the context, use - * [DB.ExecContext]. - */ - exec(query: string, ...args: any[]): Result - } - interface DB { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface DB { - /** - * Query executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - * - * Query uses [context.Background] internally; to specify the context, use - * [DB.QueryContext]. - */ - query(query: string, ...args: any[]): (Rows) - } - interface DB { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface DB { - /** - * QueryRow executes a query that is expected to return at most one row. - * QueryRow always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, [*Row.Scan] scans the first selected row and discards - * the rest. - * - * QueryRow uses [context.Background] internally; to specify the context, use - * [DB.QueryRowContext]. - */ - queryRow(query: string, ...args: any[]): (Row) - } - interface DB { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled back. - * If the context is canceled, the sql package will roll back - * the transaction. [Tx.Commit] will return an error if the context provided to - * BeginTx is canceled. - * - * The provided [TxOptions] is optional and may be nil if defaults should be used. - * If a non-default isolation level is used that the driver doesn't support, - * an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): (Tx) - } - interface DB { - /** - * Begin starts a transaction. The default isolation level is dependent on - * the driver. - * - * Begin uses [context.Background] internally; to specify the context, use - * [DB.BeginTx]. - */ - begin(): (Tx) - } - interface DB { - /** - * Driver returns the database's underlying driver. - */ - driver(): any - } - interface DB { - /** - * Conn returns a single connection by either opening a new connection - * or returning an existing connection from the connection pool. Conn will - * block until either a connection is returned or ctx is canceled. - * Queries run on the same Conn will be run in the same database session. - * - * Every Conn must be returned to the database pool after use by - * calling [Conn.Close]. - */ - conn(ctx: context.Context): (Conn) - } - /** - * Tx is an in-progress database transaction. - * - * A transaction must end with a call to [Tx.Commit] or [Tx.Rollback]. - * - * After a call to [Tx.Commit] or [Tx.Rollback], all operations on the - * transaction fail with [ErrTxDone]. - * - * The statements prepared for a transaction by calling - * the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed - * by the call to [Tx.Commit] or [Tx.Rollback]. - */ - interface Tx { - } - interface Tx { - /** - * Commit commits the transaction. - */ - commit(): void - } - interface Tx { - /** - * Rollback aborts the transaction. - */ - rollback(): void - } - interface Tx { - /** - * PrepareContext creates a prepared statement for use within a transaction. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * To use an existing prepared statement on this transaction, see [Tx.Stmt]. - * - * The provided context will be used for the preparation of the context, not - * for the execution of the returned statement. The returned statement - * will run in the transaction context. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface Tx { - /** - * Prepare creates a prepared statement for use within a transaction. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * To use an existing prepared statement on this transaction, see [Tx.Stmt]. - * - * Prepare uses [context.Background] internally; to specify the context, use - * [Tx.PrepareContext]. - */ - prepare(query: string): (Stmt) - } - interface Tx { - /** - * StmtContext returns a transaction-specific prepared statement from - * an existing statement. - * - * Example: - * - * ``` - * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") - * ... - * tx, err := db.Begin() - * ... - * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203) - * ``` - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - */ - stmtContext(ctx: context.Context, stmt: Stmt): (Stmt) - } - interface Tx { - /** - * Stmt returns a transaction-specific prepared statement from - * an existing statement. - * - * Example: - * - * ``` - * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") - * ... - * tx, err := db.Begin() - * ... - * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203) - * ``` - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * Stmt uses [context.Background] internally; to specify the context, use - * [Tx.StmtContext]. - */ - stmt(stmt: Stmt): (Stmt) - } - interface Tx { - /** - * ExecContext executes a query that doesn't return rows. - * For example: an INSERT and UPDATE. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Tx { - /** - * Exec executes a query that doesn't return rows. - * For example: an INSERT and UPDATE. - * - * Exec uses [context.Background] internally; to specify the context, use - * [Tx.ExecContext]. - */ - exec(query: string, ...args: any[]): Result - } - interface Tx { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface Tx { - /** - * Query executes a query that returns rows, typically a SELECT. - * - * Query uses [context.Background] internally; to specify the context, use - * [Tx.QueryContext]. - */ - query(query: string, ...args: any[]): (Rows) - } - interface Tx { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface Tx { - /** - * QueryRow executes a query that is expected to return at most one row. - * QueryRow always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - * - * QueryRow uses [context.Background] internally; to specify the context, use - * [Tx.QueryRowContext]. - */ - queryRow(query: string, ...args: any[]): (Row) - } - /** - * Stmt is a prepared statement. - * A Stmt is safe for concurrent use by multiple goroutines. - * - * If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single - * underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will - * become unusable and all operations will return an error. - * If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the - * [DB]. When the Stmt needs to execute on a new underlying connection, it will - * prepare itself on the new connection automatically. - */ - interface Stmt { - } - interface Stmt { - /** - * ExecContext executes a prepared statement with the given arguments and - * returns a [Result] summarizing the effect of the statement. - */ - execContext(ctx: context.Context, ...args: any[]): Result - } - interface Stmt { - /** - * Exec executes a prepared statement with the given arguments and - * returns a [Result] summarizing the effect of the statement. - * - * Exec uses [context.Background] internally; to specify the context, use - * [Stmt.ExecContext]. - */ - exec(...args: any[]): Result - } - interface Stmt { - /** - * QueryContext executes a prepared query statement with the given arguments - * and returns the query results as a [*Rows]. - */ - queryContext(ctx: context.Context, ...args: any[]): (Rows) - } - interface Stmt { - /** - * Query executes a prepared query statement with the given arguments - * and returns the query results as a *Rows. - * - * Query uses [context.Background] internally; to specify the context, use - * [Stmt.QueryContext]. - */ - query(...args: any[]): (Rows) - } - interface Stmt { - /** - * QueryRowContext executes a prepared query statement with the given arguments. - * If an error occurs during the execution of the statement, that error will - * be returned by a call to Scan on the returned [*Row], which is always non-nil. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, ...args: any[]): (Row) - } - interface Stmt { - /** - * QueryRow executes a prepared query statement with the given arguments. - * If an error occurs during the execution of the statement, that error will - * be returned by a call to Scan on the returned [*Row], which is always non-nil. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - * - * Example usage: - * - * ``` - * var name string - * err := nameByUseridStmt.QueryRow(id).Scan(&name) - * ``` - * - * QueryRow uses [context.Background] internally; to specify the context, use - * [Stmt.QueryRowContext]. - */ - queryRow(...args: any[]): (Row) - } - interface Stmt { - /** - * Close closes the statement. - */ - close(): void - } - /** - * Rows is the result of a query. Its cursor starts before the first row - * of the result set. Use [Rows.Next] to advance from row to row. - */ - interface Rows { - } - interface Rows { - /** - * Next prepares the next result row for reading with the [Rows.Scan] method. It - * returns true on success, or false if there is no next result row or an error - * happened while preparing it. [Rows.Err] should be consulted to distinguish between - * the two cases. - * - * Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next]. - */ - next(): boolean - } - interface Rows { - /** - * NextResultSet prepares the next result set for reading. It reports whether - * there is further result sets, or false if there is no further result set - * or if there is an error advancing to it. The [Rows.Err] method should be consulted - * to distinguish between the two cases. - * - * After calling NextResultSet, the [Rows.Next] method should always be called before - * scanning. If there are further result sets they may not have rows in the result - * set. - */ - nextResultSet(): boolean - } - interface Rows { - /** - * Err returns the error, if any, that was encountered during iteration. - * Err may be called after an explicit or implicit [Rows.Close]. - */ - err(): void - } - interface Rows { - /** - * Columns returns the column names. - * Columns returns an error if the rows are closed. - */ - columns(): Array - } - interface Rows { - /** - * ColumnTypes returns column information such as column type, length, - * and nullable. Some information may not be available from some drivers. - */ - columnTypes(): Array<(ColumnType | undefined)> - } - interface Rows { - /** - * Scan copies the columns in the current row into the values pointed - * at by dest. The number of values in dest must be the same as the - * number of columns in [Rows]. - * - * Scan converts columns read from the database into the following - * common Go types and special types provided by the sql package: - * - * ``` - * *string - * *[]byte - * *int, *int8, *int16, *int32, *int64 - * *uint, *uint8, *uint16, *uint32, *uint64 - * *bool - * *float32, *float64 - * *interface{} - * *RawBytes - * *Rows (cursor value) - * any type implementing Scanner (see Scanner docs) - * ``` - * - * In the most simple case, if the type of the value from the source - * column is an integer, bool or string type T and dest is of type *T, - * Scan simply assigns the value through the pointer. - * - * Scan also converts between string and numeric types, as long as no - * information would be lost. While Scan stringifies all numbers - * scanned from numeric database columns into *string, scans into - * numeric types are checked for overflow. For example, a float64 with - * value 300 or a string with value "300" can scan into a uint16, but - * not into a uint8, though float64(255) or "255" can scan into a - * uint8. One exception is that scans of some float64 numbers to - * strings may lose information when stringifying. In general, scan - * floating point columns into *float64. - * - * If a dest argument has type *[]byte, Scan saves in that argument a - * copy of the corresponding data. The copy is owned by the caller and - * can be modified and held indefinitely. The copy can be avoided by - * using an argument of type [*RawBytes] instead; see the documentation - * for [RawBytes] for restrictions on its use. - * - * If an argument has type *interface{}, Scan copies the value - * provided by the underlying driver without conversion. When scanning - * from a source value of type []byte to *interface{}, a copy of the - * slice is made and the caller owns the result. - * - * Source values of type [time.Time] may be scanned into values of type - * *time.Time, *interface{}, *string, or *[]byte. When converting to - * the latter two, [time.RFC3339Nano] is used. - * - * Source values of type bool may be scanned into types *bool, - * *interface{}, *string, *[]byte, or [*RawBytes]. - * - * For scanning into *bool, the source may be true, false, 1, 0, or - * string inputs parseable by [strconv.ParseBool]. - * - * Scan can also convert a cursor returned from a query, such as - * "select cursor(select * from my_table) from dual", into a - * [*Rows] value that can itself be scanned from. The parent - * select query will close any cursor [*Rows] if the parent [*Rows] is closed. - * - * If any of the first arguments implementing [Scanner] returns an error, - * that error will be wrapped in the returned error. - */ - scan(...dest: any[]): void - } - interface Rows { - /** - * Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called - * and returns false and there are no further result sets, - * the [Rows] are closed automatically and it will suffice to check the - * result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err]. - */ - close(): void - } - /** - * A Result summarizes an executed SQL command. - */ - interface Result { - [key:string]: any; - /** - * LastInsertId returns the integer generated by the database - * in response to a command. Typically this will be from an - * "auto increment" column when inserting a new row. Not all - * databases support this feature, and the syntax of such - * statements varies. - */ - lastInsertId(): number - /** - * RowsAffected returns the number of rows affected by an - * update, insert, or delete. Not every database or database - * driver may support this. - */ - rowsAffected(): number - } -} - -/** - * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer - * object, creating another object (Reader or Writer) that also implements - * the interface but provides buffering and some help for textual I/O. - */ -namespace bufio { - /** - * ReadWriter stores pointers to a [Reader] and a [Writer]. - * It implements [io.ReadWriter]. - */ - type _subxjvIW = Reader&Writer - interface ReadWriter extends _subxjvIW { - } -} - /** * Package syntax parses regular expressions into parse trees and compiles * parse trees into programs. Most clients of regular expressions will use the @@ -8711,376 +7883,165 @@ namespace syntax { } /** - * Package exec runs external commands. It wraps os.StartProcess to make it - * easier to remap stdin and stdout, connect I/O with pipes, and do other - * adjustments. + * Package context defines the Context type, which carries deadlines, + * cancellation signals, and other request-scoped values across API boundaries + * and between processes. * - * Unlike the "system" library call from C and other languages, the - * os/exec package intentionally does not invoke the system shell and - * does not expand any glob patterns or handle other expansions, - * pipelines, or redirections typically done by shells. The package - * behaves more like C's "exec" family of functions. To expand glob - * patterns, either call the shell directly, taking care to escape any - * dangerous input, or use the [path/filepath] package's Glob function. - * To expand environment variables, use package os's ExpandEnv. + * Incoming requests to a server should create a [Context], and outgoing + * calls to servers should accept a Context. The chain of function + * calls between them must propagate the Context, optionally replacing + * it with a derived Context created using [WithCancel], [WithDeadline], + * [WithTimeout], or [WithValue]. When a Context is canceled, all + * Contexts derived from it are also canceled. * - * Note that the examples in this package assume a Unix system. - * They may not run on Windows, and they do not run in the Go Playground - * used by golang.org and godoc.org. + * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a + * Context (the parent) and return a derived Context (the child) and a + * [CancelFunc]. Calling the CancelFunc cancels the child and its + * children, removes the parent's reference to the child, and stops + * any associated timers. Failing to call the CancelFunc leaks the + * child and its children until the parent is canceled or the timer + * fires. The go vet tool checks that CancelFuncs are used on all + * control-flow paths. * - * # Executables in the current directory + * The [WithCancelCause] function returns a [CancelCauseFunc], which + * takes an error and records it as the cancellation cause. Calling + * [Cause] on the canceled context or any of its children retrieves + * the cause. If no cause is specified, Cause(ctx) returns the same + * value as ctx.Err(). * - * The functions [Command] and [LookPath] look for a program - * in the directories listed in the current path, following the - * conventions of the host operating system. - * Operating systems have for decades included the current - * directory in this search, sometimes implicitly and sometimes - * configured explicitly that way by default. - * Modern practice is that including the current directory - * is usually unexpected and often leads to security problems. + * Programs that use Contexts should follow these rules to keep interfaces + * consistent across packages and enable static analysis tools to check context + * propagation: * - * To avoid those security problems, as of Go 1.19, this package will not resolve a program - * using an implicit or explicit path entry relative to the current directory. - * That is, if you run [LookPath]("go"), it will not successfully return - * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. - * Instead, if the usual path algorithms would result in that answer, - * these functions return an error err satisfying [errors.Is](err, [ErrDot]). - * - * For example, consider these two program snippets: + * Do not store Contexts inside a struct type; instead, pass a Context + * explicitly to each function that needs it. The Context should be the first + * parameter, typically named ctx: * * ``` - * path, err := exec.LookPath("prog") - * if err != nil { - * log.Fatal(err) - * } - * use(path) - * ``` - * - * and - * - * ``` - * cmd := exec.Command("prog") - * if err := cmd.Run(); err != nil { - * log.Fatal(err) + * func DoSomething(ctx context.Context, arg Arg) error { + * // ... use ctx ... * } * ``` * - * These will not find and run ./prog or .\prog.exe, - * no matter how the current path is configured. + * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] + * if you are unsure about which Context to use. * - * Code that always wants to run a program from the current directory - * can be rewritten to say "./prog" instead of "prog". + * Use context Values only for request-scoped data that transits processes and + * APIs, not for passing optional parameters to functions. * - * Code that insists on including results from relative path entries - * can instead override the error using an errors.Is check: + * The same Context may be passed to functions running in different goroutines; + * Contexts are safe for simultaneous use by multiple goroutines. * - * ``` - * path, err := exec.LookPath("prog") - * if errors.Is(err, exec.ErrDot) { - * err = nil - * } - * if err != nil { - * log.Fatal(err) - * } - * use(path) - * ``` - * - * and - * - * ``` - * cmd := exec.Command("prog") - * if errors.Is(cmd.Err, exec.ErrDot) { - * cmd.Err = nil - * } - * if err := cmd.Run(); err != nil { - * log.Fatal(err) - * } - * ``` - * - * Setting the environment variable GODEBUG=execerrdot=0 - * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19 - * behavior for programs that are unable to apply more targeted fixes. - * A future version of Go may remove support for this variable. - * - * Before adding such overrides, make sure you understand the - * security implications of doing so. - * See https://go.dev/blog/path-security for more information. + * See https://blog.golang.org/context for example code for a server that uses + * Contexts. */ -namespace exec { +namespace context { /** - * Cmd represents an external command being prepared or run. + * A Context carries a deadline, a cancellation signal, and other values across + * API boundaries. * - * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] - * methods. + * Context's methods may be called by multiple goroutines simultaneously. */ - interface Cmd { + interface Context { + [key:string]: any; /** - * Path is the path of the command to run. - * - * This is the only field that must be set to a non-zero - * value. If Path is relative, it is evaluated relative - * to Dir. + * Deadline returns the time when work done on behalf of this context + * should be canceled. Deadline returns ok==false when no deadline is + * set. Successive calls to Deadline return the same results. */ - path: string + deadline(): [time.Time, boolean] /** - * Args holds command line arguments, including the command as Args[0]. - * If the Args field is empty or nil, Run uses {Path}. + * Done returns a channel that's closed when work done on behalf of this + * context should be canceled. Done may return nil if this context can + * never be canceled. Successive calls to Done return the same value. + * The close of the Done channel may happen asynchronously, + * after the cancel function returns. * - * In typical use, both Path and Args are set by calling Command. + * WithCancel arranges for Done to be closed when cancel is called; + * WithDeadline arranges for Done to be closed when the deadline + * expires; WithTimeout arranges for Done to be closed when the timeout + * elapses. + * + * Done is provided for use in select statements: + * + * // Stream generates values with DoSomething and sends them to out + * // until DoSomething returns an error or ctx.Done is closed. + * func Stream(ctx context.Context, out chan<- Value) error { + * for { + * v, err := DoSomething(ctx) + * if err != nil { + * return err + * } + * select { + * case <-ctx.Done(): + * return ctx.Err() + * case out <- v: + * } + * } + * } + * + * See https://blog.golang.org/pipelines for more examples of how to use + * a Done channel for cancellation. */ - args: Array + done(): undefined /** - * Env specifies the environment of the process. - * Each entry is of the form "key=value". - * If Env is nil, the new process uses the current process's - * environment. - * If Env contains duplicate environment keys, only the last - * value in the slice for each duplicate key is used. - * As a special case on Windows, SYSTEMROOT is always added if - * missing and not explicitly set to the empty string. + * If Done is not yet closed, Err returns nil. + * If Done is closed, Err returns a non-nil error explaining why: + * Canceled if the context was canceled + * or DeadlineExceeded if the context's deadline passed. + * After Err returns a non-nil error, successive calls to Err return the same error. */ - env: Array + err(): void /** - * Dir specifies the working directory of the command. - * If Dir is the empty string, Run runs the command in the - * calling process's current directory. + * Value returns the value associated with this context for key, or nil + * if no value is associated with key. Successive calls to Value with + * the same key returns the same result. + * + * Use context values only for request-scoped data that transits + * processes and API boundaries, not for passing optional parameters to + * functions. + * + * A key identifies a specific value in a Context. Functions that wish + * to store values in Context typically allocate a key in a global + * variable then use that key as the argument to context.WithValue and + * Context.Value. A key can be any type that supports equality; + * packages should define keys as an unexported type to avoid + * collisions. + * + * Packages that define a Context key should provide type-safe accessors + * for the values stored using that key: + * + * ``` + * // Package user defines a User type that's stored in Contexts. + * package user + * + * import "context" + * + * // User is the type of value stored in the Contexts. + * type User struct {...} + * + * // key is an unexported type for keys defined in this package. + * // This prevents collisions with keys defined in other packages. + * type key int + * + * // userKey is the key for user.User values in Contexts. It is + * // unexported; clients use user.NewContext and user.FromContext + * // instead of using this key directly. + * var userKey key + * + * // NewContext returns a new Context that carries value u. + * func NewContext(ctx context.Context, u *User) context.Context { + * return context.WithValue(ctx, userKey, u) + * } + * + * // FromContext returns the User value stored in ctx, if any. + * func FromContext(ctx context.Context) (*User, bool) { + * u, ok := ctx.Value(userKey).(*User) + * return u, ok + * } + * ``` */ - dir: string - /** - * Stdin specifies the process's standard input. - * - * If Stdin is nil, the process reads from the null device (os.DevNull). - * - * If Stdin is an *os.File, the process's standard input is connected - * directly to that file. - * - * Otherwise, during the execution of the command a separate - * goroutine reads from Stdin and delivers that data to the command - * over a pipe. In this case, Wait does not complete until the goroutine - * stops copying, either because it has reached the end of Stdin - * (EOF or a read error), or because writing to the pipe returned an error, - * or because a nonzero WaitDelay was set and expired. - */ - stdin: io.Reader - /** - * Stdout and Stderr specify the process's standard output and error. - * - * If either is nil, Run connects the corresponding file descriptor - * to the null device (os.DevNull). - * - * If either is an *os.File, the corresponding output from the process - * is connected directly to that file. - * - * Otherwise, during the execution of the command a separate goroutine - * reads from the process over a pipe and delivers that data to the - * corresponding Writer. In this case, Wait does not complete until the - * goroutine reaches EOF or encounters an error or a nonzero WaitDelay - * expires. - * - * If Stdout and Stderr are the same writer, and have a type that can - * be compared with ==, at most one goroutine at a time will call Write. - */ - stdout: io.Writer - stderr: io.Writer - /** - * ExtraFiles specifies additional open files to be inherited by the - * new process. It does not include standard input, standard output, or - * standard error. If non-nil, entry i becomes file descriptor 3+i. - * - * ExtraFiles is not supported on Windows. - */ - extraFiles: Array<(os.File | undefined)> - /** - * SysProcAttr holds optional, operating system-specific attributes. - * Run passes it to os.StartProcess as the os.ProcAttr's Sys field. - */ - sysProcAttr?: syscall.SysProcAttr - /** - * Process is the underlying process, once started. - */ - process?: os.Process - /** - * ProcessState contains information about an exited process. - * If the process was started successfully, Wait or Run will - * populate its ProcessState when the command completes. - */ - processState?: os.ProcessState - err: Error // LookPath error, if any. - /** - * If Cancel is non-nil, the command must have been created with - * CommandContext and Cancel will be called when the command's - * Context is done. By default, CommandContext sets Cancel to - * call the Kill method on the command's Process. - * - * Typically a custom Cancel will send a signal to the command's - * Process, but it may instead take other actions to initiate cancellation, - * such as closing a stdin or stdout pipe or sending a shutdown request on a - * network socket. - * - * If the command exits with a success status after Cancel is - * called, and Cancel does not return an error equivalent to - * os.ErrProcessDone, then Wait and similar methods will return a non-nil - * error: either an error wrapping the one returned by Cancel, - * or the error from the Context. - * (If the command exits with a non-success status, or Cancel - * returns an error that wraps os.ErrProcessDone, Wait and similar methods - * continue to return the command's usual exit status.) - * - * If Cancel is set to nil, nothing will happen immediately when the command's - * Context is done, but a nonzero WaitDelay will still take effect. That may - * be useful, for example, to work around deadlocks in commands that do not - * support shutdown signals but are expected to always finish quickly. - * - * Cancel will not be called if Start returns a non-nil error. - */ - cancel: () => void - /** - * If WaitDelay is non-zero, it bounds the time spent waiting on two sources - * of unexpected delay in Wait: a child process that fails to exit after the - * associated Context is canceled, and a child process that exits but leaves - * its I/O pipes unclosed. - * - * The WaitDelay timer starts when either the associated Context is done or a - * call to Wait observes that the child process has exited, whichever occurs - * first. When the delay has elapsed, the command shuts down the child process - * and/or its I/O pipes. - * - * If the child process has failed to exit — perhaps because it ignored or - * failed to receive a shutdown signal from a Cancel function, or because no - * Cancel function was set — then it will be terminated using os.Process.Kill. - * - * Then, if the I/O pipes communicating with the child process are still open, - * those pipes are closed in order to unblock any goroutines currently blocked - * on Read or Write calls. - * - * If pipes are closed due to WaitDelay, no Cancel call has occurred, - * and the command has otherwise exited with a successful status, Wait and - * similar methods will return ErrWaitDelay instead of nil. - * - * If WaitDelay is zero (the default), I/O pipes will be read until EOF, - * which might not occur until orphaned subprocesses of the command have - * also closed their descriptors for the pipes. - */ - waitDelay: time.Duration - } - interface Cmd { - /** - * String returns a human-readable description of c. - * It is intended only for debugging. - * In particular, it is not suitable for use as input to a shell. - * The output of String may vary across Go releases. - */ - string(): string - } - interface Cmd { - /** - * Run starts the specified command and waits for it to complete. - * - * The returned error is nil if the command runs, has no problems - * copying stdin, stdout, and stderr, and exits with a zero exit - * status. - * - * If the command starts but does not complete successfully, the error is of - * type [*ExitError]. Other error types may be returned for other situations. - * - * If the calling goroutine has locked the operating system thread - * with [runtime.LockOSThread] and modified any inheritable OS-level - * thread state (for example, Linux or Plan 9 name spaces), the new - * process will inherit the caller's thread state. - */ - run(): void - } - interface Cmd { - /** - * Start starts the specified command but does not wait for it to complete. - * - * If Start returns successfully, the c.Process field will be set. - * - * After a successful call to Start the [Cmd.Wait] method must be called in - * order to release associated system resources. - */ - start(): void - } - interface Cmd { - /** - * Wait waits for the command to exit and waits for any copying to - * stdin or copying from stdout or stderr to complete. - * - * The command must have been started by [Cmd.Start]. - * - * The returned error is nil if the command runs, has no problems - * copying stdin, stdout, and stderr, and exits with a zero exit - * status. - * - * If the command fails to run or doesn't complete successfully, the - * error is of type [*ExitError]. Other error types may be - * returned for I/O problems. - * - * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits - * for the respective I/O loop copying to or from the process to complete. - * - * Wait releases any resources associated with the [Cmd]. - */ - wait(): void - } - interface Cmd { - /** - * Output runs the command and returns its standard output. - * Any returned error will usually be of type [*ExitError]. - * If c.Stderr was nil, Output populates [ExitError.Stderr]. - */ - output(): string|Array - } - interface Cmd { - /** - * CombinedOutput runs the command and returns its combined standard - * output and standard error. - */ - combinedOutput(): string|Array - } - interface Cmd { - /** - * StdinPipe returns a pipe that will be connected to the command's - * standard input when the command starts. - * The pipe will be closed automatically after [Cmd.Wait] sees the command exit. - * A caller need only call Close to force the pipe to close sooner. - * For example, if the command being run will not exit until standard input - * is closed, the caller must close the pipe. - */ - stdinPipe(): io.WriteCloser - } - interface Cmd { - /** - * StdoutPipe returns a pipe that will be connected to the command's - * standard output when the command starts. - * - * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. - * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. - * See the example for idiomatic usage. - */ - stdoutPipe(): io.ReadCloser - } - interface Cmd { - /** - * StderrPipe returns a pipe that will be connected to the command's - * standard error when the command starts. - * - * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. - * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. - * See the StdoutPipe example for idiomatic usage. - */ - stderrPipe(): io.ReadCloser - } - interface Cmd { - /** - * Environ returns a copy of the environment in which the command would be run - * as it is currently configured. - */ - environ(): Array + value(key: any): any } } @@ -9308,1144 +8269,6 @@ namespace jwt { } } -/** - * Package multipart implements MIME multipart parsing, as defined in RFC - * 2046. - * - * The implementation is sufficient for HTTP (RFC 2388) and the multipart - * bodies generated by popular browsers. - * - * # Limits - * - * To protect against malicious inputs, this package sets limits on the size - * of the MIME data it processes. - * - * [Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a - * part to 10000 and [Reader.ReadForm] limits the total number of headers in all - * FileHeaders to 10000. - * These limits may be adjusted with the GODEBUG=multipartmaxheaders= - * setting. - * - * Reader.ReadForm further limits the number of parts in a form to 1000. - * This limit may be adjusted with the GODEBUG=multipartmaxparts= - * setting. - */ -namespace multipart { - /** - * A FileHeader describes a file part of a multipart request. - */ - interface FileHeader { - filename: string - header: textproto.MIMEHeader - size: number - } - interface FileHeader { - /** - * Open opens and returns the [FileHeader]'s associated File. - */ - open(): File - } -} - -/** - * Package http provides HTTP client and server implementations. - * - * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: - * - * ``` - * resp, err := http.Get("http://example.com/") - * ... - * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) - * ... - * resp, err := http.PostForm("http://example.com/form", - * url.Values{"key": {"Value"}, "id": {"123"}}) - * ``` - * - * The caller must close the response body when finished with it: - * - * ``` - * resp, err := http.Get("http://example.com/") - * if err != nil { - * // handle error - * } - * defer resp.Body.Close() - * body, err := io.ReadAll(resp.Body) - * // ... - * ``` - * - * # Clients and Transports - * - * For control over HTTP client headers, redirect policy, and other - * settings, create a [Client]: - * - * ``` - * client := &http.Client{ - * CheckRedirect: redirectPolicyFunc, - * } - * - * resp, err := client.Get("http://example.com") - * // ... - * - * req, err := http.NewRequest("GET", "http://example.com", nil) - * // ... - * req.Header.Add("If-None-Match", `W/"wyzzy"`) - * resp, err := client.Do(req) - * // ... - * ``` - * - * For control over proxies, TLS configuration, keep-alives, - * compression, and other settings, create a [Transport]: - * - * ``` - * tr := &http.Transport{ - * MaxIdleConns: 10, - * IdleConnTimeout: 30 * time.Second, - * DisableCompression: true, - * } - * client := &http.Client{Transport: tr} - * resp, err := client.Get("https://example.com") - * ``` - * - * Clients and Transports are safe for concurrent use by multiple - * goroutines and for efficiency should only be created once and re-used. - * - * # Servers - * - * ListenAndServe starts an HTTP server with a given address and handler. - * The handler is usually nil, which means to use [DefaultServeMux]. - * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: - * - * ``` - * http.Handle("/foo", fooHandler) - * - * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { - * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) - * }) - * - * log.Fatal(http.ListenAndServe(":8080", nil)) - * ``` - * - * More control over the server's behavior is available by creating a - * custom Server: - * - * ``` - * s := &http.Server{ - * Addr: ":8080", - * Handler: myHandler, - * ReadTimeout: 10 * time.Second, - * WriteTimeout: 10 * time.Second, - * MaxHeaderBytes: 1 << 20, - * } - * log.Fatal(s.ListenAndServe()) - * ``` - * - * # HTTP/2 - * - * Starting with Go 1.6, the http package has transparent support for the - * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 - * can do so by setting [Transport.TLSNextProto] (for clients) or - * [Server.TLSNextProto] (for servers) to a non-nil, empty - * map. Alternatively, the following GODEBUG settings are - * currently supported: - * - * ``` - * GODEBUG=http2client=0 # disable HTTP/2 client support - * GODEBUG=http2server=0 # disable HTTP/2 server support - * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs - * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps - * ``` - * - * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug - * - * The http package's [Transport] and [Server] both automatically enable - * HTTP/2 support for simple configurations. To enable HTTP/2 for more - * complex configurations, to use lower-level HTTP/2 features, or to use - * a newer version of Go's http2 package, import "golang.org/x/net/http2" - * directly and use its ConfigureTransport and/or ConfigureServer - * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 - * package takes precedence over the net/http package's built-in HTTP/2 - * support. - */ -namespace http { - // @ts-ignore - import mathrand = rand - /** - * PushOptions describes options for [Pusher.Push]. - */ - interface PushOptions { - /** - * Method specifies the HTTP method for the promised request. - * If set, it must be "GET" or "HEAD". Empty means "GET". - */ - method: string - /** - * Header specifies additional promised request headers. This cannot - * include HTTP/2 pseudo header fields like ":path" and ":scheme", - * which will be added automatically. - */ - header: Header - } - // @ts-ignore - import urlpkg = url - /** - * A Request represents an HTTP request received by a server - * or to be sent by a client. - * - * The field semantics differ slightly between client and server - * usage. In addition to the notes on the fields below, see the - * documentation for [Request.Write] and [RoundTripper]. - */ - interface Request { - /** - * Method specifies the HTTP method (GET, POST, PUT, etc.). - * For client requests, an empty string means GET. - */ - method: string - /** - * URL specifies either the URI being requested (for server - * requests) or the URL to access (for client requests). - * - * For server requests, the URL is parsed from the URI - * supplied on the Request-Line as stored in RequestURI. For - * most requests, fields other than Path and RawQuery will be - * empty. (See RFC 7230, Section 5.3) - * - * For client requests, the URL's Host specifies the server to - * connect to, while the Request's Host field optionally - * specifies the Host header value to send in the HTTP - * request. - */ - url?: url.URL - /** - * The protocol version for incoming server requests. - * - * For client requests, these fields are ignored. The HTTP - * client code always uses either HTTP/1.1 or HTTP/2. - * See the docs on Transport for details. - */ - proto: string // "HTTP/1.0" - protoMajor: number // 1 - protoMinor: number // 0 - /** - * Header contains the request header fields either received - * by the server or to be sent by the client. - * - * If a server received a request with header lines, - * - * ``` - * Host: example.com - * accept-encoding: gzip, deflate - * Accept-Language: en-us - * fOO: Bar - * foo: two - * ``` - * - * then - * - * ``` - * Header = map[string][]string{ - * "Accept-Encoding": {"gzip, deflate"}, - * "Accept-Language": {"en-us"}, - * "Foo": {"Bar", "two"}, - * } - * ``` - * - * For incoming requests, the Host header is promoted to the - * Request.Host field and removed from the Header map. - * - * HTTP defines that header names are case-insensitive. The - * request parser implements this by using CanonicalHeaderKey, - * making the first character and any characters following a - * hyphen uppercase and the rest lowercase. - * - * For client requests, certain headers such as Content-Length - * and Connection are automatically written when needed and - * values in Header may be ignored. See the documentation - * for the Request.Write method. - */ - header: Header - /** - * Body is the request's body. - * - * For client requests, a nil body means the request has no - * body, such as a GET request. The HTTP Client's Transport - * is responsible for calling the Close method. - * - * For server requests, the Request Body is always non-nil - * but will return EOF immediately when no body is present. - * The Server will close the request body. The ServeHTTP - * Handler does not need to. - * - * Body must allow Read to be called concurrently with Close. - * In particular, calling Close should unblock a Read waiting - * for input. - */ - body: io.ReadCloser - /** - * GetBody defines an optional func to return a new copy of - * Body. It is used for client requests when a redirect requires - * reading the body more than once. Use of GetBody still - * requires setting Body. - * - * For server requests, it is unused. - */ - getBody: () => io.ReadCloser - /** - * ContentLength records the length of the associated content. - * The value -1 indicates that the length is unknown. - * Values >= 0 indicate that the given number of bytes may - * be read from Body. - * - * For client requests, a value of 0 with a non-nil Body is - * also treated as unknown. - */ - contentLength: number - /** - * TransferEncoding lists the transfer encodings from outermost to - * innermost. An empty list denotes the "identity" encoding. - * TransferEncoding can usually be ignored; chunked encoding is - * automatically added and removed as necessary when sending and - * receiving requests. - */ - transferEncoding: Array - /** - * Close indicates whether to close the connection after - * replying to this request (for servers) or after sending this - * request and reading its response (for clients). - * - * For server requests, the HTTP server handles this automatically - * and this field is not needed by Handlers. - * - * For client requests, setting this field prevents re-use of - * TCP connections between requests to the same hosts, as if - * Transport.DisableKeepAlives were set. - */ - close: boolean - /** - * For server requests, Host specifies the host on which the - * URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this - * is either the value of the "Host" header or the host name - * given in the URL itself. For HTTP/2, it is the value of the - * ":authority" pseudo-header field. - * It may be of the form "host:port". For international domain - * names, Host may be in Punycode or Unicode form. Use - * golang.org/x/net/idna to convert it to either format if - * needed. - * To prevent DNS rebinding attacks, server Handlers should - * validate that the Host header has a value for which the - * Handler considers itself authoritative. The included - * ServeMux supports patterns registered to particular host - * names and thus protects its registered Handlers. - * - * For client requests, Host optionally overrides the Host - * header to send. If empty, the Request.Write method uses - * the value of URL.Host. Host may contain an international - * domain name. - */ - host: string - /** - * Form contains the parsed form data, including both the URL - * field's query parameters and the PATCH, POST, or PUT form data. - * This field is only available after ParseForm is called. - * The HTTP client ignores Form and uses Body instead. - */ - form: url.Values - /** - * PostForm contains the parsed form data from PATCH, POST - * or PUT body parameters. - * - * This field is only available after ParseForm is called. - * The HTTP client ignores PostForm and uses Body instead. - */ - postForm: url.Values - /** - * MultipartForm is the parsed multipart form, including file uploads. - * This field is only available after ParseMultipartForm is called. - * The HTTP client ignores MultipartForm and uses Body instead. - */ - multipartForm?: multipart.Form - /** - * Trailer specifies additional headers that are sent after the request - * body. - * - * For server requests, the Trailer map initially contains only the - * trailer keys, with nil values. (The client declares which trailers it - * will later send.) While the handler is reading from Body, it must - * not reference Trailer. After reading from Body returns EOF, Trailer - * can be read again and will contain non-nil values, if they were sent - * by the client. - * - * For client requests, Trailer must be initialized to a map containing - * the trailer keys to later send. The values may be nil or their final - * values. The ContentLength must be 0 or -1, to send a chunked request. - * After the HTTP request is sent the map values can be updated while - * the request body is read. Once the body returns EOF, the caller must - * not mutate Trailer. - * - * Few HTTP clients, servers, or proxies support HTTP trailers. - */ - trailer: Header - /** - * RemoteAddr allows HTTP servers and other software to record - * the network address that sent the request, usually for - * logging. This field is not filled in by ReadRequest and - * has no defined format. The HTTP server in this package - * sets RemoteAddr to an "IP:port" address before invoking a - * handler. - * This field is ignored by the HTTP client. - */ - remoteAddr: string - /** - * RequestURI is the unmodified request-target of the - * Request-Line (RFC 7230, Section 3.1.1) as sent by the client - * to a server. Usually the URL field should be used instead. - * It is an error to set this field in an HTTP client request. - */ - requestURI: string - /** - * TLS allows HTTP servers and other software to record - * information about the TLS connection on which the request - * was received. This field is not filled in by ReadRequest. - * The HTTP server in this package sets the field for - * TLS-enabled connections before invoking a handler; - * otherwise it leaves the field nil. - * This field is ignored by the HTTP client. - */ - tls?: any - /** - * Cancel is an optional channel whose closure indicates that the client - * request should be regarded as canceled. Not all implementations of - * RoundTripper may support Cancel. - * - * For server requests, this field is not applicable. - * - * Deprecated: Set the Request's context with NewRequestWithContext - * instead. If a Request's Cancel field and context are both - * set, it is undefined whether Cancel is respected. - */ - cancel: undefined - /** - * Response is the redirect response which caused this request - * to be created. This field is only populated during client - * redirects. - */ - response?: Response - /** - * Pattern is the [ServeMux] pattern that matched the request. - * It is empty if the request was not matched against a pattern. - */ - pattern: string - } - interface Request { - /** - * Context returns the request's context. To change the context, use - * [Request.Clone] or [Request.WithContext]. - * - * The returned context is always non-nil; it defaults to the - * background context. - * - * For outgoing client requests, the context controls cancellation. - * - * For incoming server requests, the context is canceled when the - * client's connection closes, the request is canceled (with HTTP/2), - * or when the ServeHTTP method returns. - */ - context(): context.Context - } - interface Request { - /** - * WithContext returns a shallow copy of r with its context changed - * to ctx. The provided ctx must be non-nil. - * - * For outgoing client request, the context controls the entire - * lifetime of a request and its response: obtaining a connection, - * sending the request, and reading the response headers and body. - * - * To create a new request with a context, use [NewRequestWithContext]. - * To make a deep copy of a request with a new context, use [Request.Clone]. - */ - withContext(ctx: context.Context): (Request) - } - interface Request { - /** - * Clone returns a deep copy of r with its context changed to ctx. - * The provided ctx must be non-nil. - * - * Clone only makes a shallow copy of the Body field. - * - * For an outgoing client request, the context controls the entire - * lifetime of a request and its response: obtaining a connection, - * sending the request, and reading the response headers and body. - */ - clone(ctx: context.Context): (Request) - } - interface Request { - /** - * ProtoAtLeast reports whether the HTTP protocol used - * in the request is at least major.minor. - */ - protoAtLeast(major: number, minor: number): boolean - } - interface Request { - /** - * UserAgent returns the client's User-Agent, if sent in the request. - */ - userAgent(): string - } - interface Request { - /** - * Cookies parses and returns the HTTP cookies sent with the request. - */ - cookies(): Array<(Cookie | undefined)> - } - interface Request { - /** - * CookiesNamed parses and returns the named HTTP cookies sent with the request - * or an empty slice if none matched. - */ - cookiesNamed(name: string): Array<(Cookie | undefined)> - } - interface Request { - /** - * Cookie returns the named cookie provided in the request or - * [ErrNoCookie] if not found. - * If multiple cookies match the given name, only one cookie will - * be returned. - */ - cookie(name: string): (Cookie) - } - interface Request { - /** - * AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, - * AddCookie does not attach more than one [Cookie] header field. That - * means all cookies, if any, are written into the same line, - * separated by semicolon. - * AddCookie only sanitizes c's name and value, and does not sanitize - * a Cookie header already present in the request. - */ - addCookie(c: Cookie): void - } - interface Request { - /** - * Referer returns the referring URL, if sent in the request. - * - * Referer is misspelled as in the request itself, a mistake from the - * earliest days of HTTP. This value can also be fetched from the - * [Header] map as Header["Referer"]; the benefit of making it available - * as a method is that the compiler can diagnose programs that use the - * alternate (correct English) spelling req.Referrer() but cannot - * diagnose programs that use Header["Referrer"]. - */ - referer(): string - } - interface Request { - /** - * MultipartReader returns a MIME multipart reader if this is a - * multipart/form-data or a multipart/mixed POST request, else returns nil and an error. - * Use this function instead of [Request.ParseMultipartForm] to - * process the request body as a stream. - */ - multipartReader(): (multipart.Reader) - } - interface Request { - /** - * Write writes an HTTP/1.1 request, which is the header and body, in wire format. - * This method consults the following fields of the request: - * - * ``` - * Host - * URL - * Method (defaults to "GET") - * Header - * ContentLength - * TransferEncoding - * Body - * ``` - * - * If Body is present, Content-Length is <= 0 and [Request.TransferEncoding] - * hasn't been set to "identity", Write adds "Transfer-Encoding: - * chunked" to the header. Body is closed after it is sent. - */ - write(w: io.Writer): void - } - interface Request { - /** - * WriteProxy is like [Request.Write] but writes the request in the form - * expected by an HTTP proxy. In particular, [Request.WriteProxy] writes the - * initial Request-URI line of the request with an absolute URI, per - * section 5.3 of RFC 7230, including the scheme and host. - * In either case, WriteProxy also writes a Host header, using - * either r.Host or r.URL.Host. - */ - writeProxy(w: io.Writer): void - } - interface Request { - /** - * BasicAuth returns the username and password provided in the request's - * Authorization header, if the request uses HTTP Basic Authentication. - * See RFC 2617, Section 2. - */ - basicAuth(): [string, boolean] - } - interface Request { - /** - * SetBasicAuth sets the request's Authorization header to use HTTP - * Basic Authentication with the provided username and password. - * - * With HTTP Basic Authentication the provided username and password - * are not encrypted. It should generally only be used in an HTTPS - * request. - * - * The username may not contain a colon. Some protocols may impose - * additional requirements on pre-escaping the username and - * password. For instance, when used with OAuth2, both arguments must - * be URL encoded first with [url.QueryEscape]. - */ - setBasicAuth(username: string, password: string): void - } - interface Request { - /** - * ParseForm populates r.Form and r.PostForm. - * - * For all requests, ParseForm parses the raw query from the URL and updates - * r.Form. - * - * For POST, PUT, and PATCH requests, it also reads the request body, parses it - * as a form and puts the results into both r.PostForm and r.Form. Request body - * parameters take precedence over URL query string values in r.Form. - * - * If the request Body's size has not already been limited by [MaxBytesReader], - * the size is capped at 10MB. - * - * For other HTTP methods, or when the Content-Type is not - * application/x-www-form-urlencoded, the request Body is not read, and - * r.PostForm is initialized to a non-nil, empty value. - * - * [Request.ParseMultipartForm] calls ParseForm automatically. - * ParseForm is idempotent. - */ - parseForm(): void - } - interface Request { - /** - * ParseMultipartForm parses a request body as multipart/form-data. - * The whole request body is parsed and up to a total of maxMemory bytes of - * its file parts are stored in memory, with the remainder stored on - * disk in temporary files. - * ParseMultipartForm calls [Request.ParseForm] if necessary. - * If ParseForm returns an error, ParseMultipartForm returns it but also - * continues parsing the request body. - * After one call to ParseMultipartForm, subsequent calls have no effect. - */ - parseMultipartForm(maxMemory: number): void - } - interface Request { - /** - * FormValue returns the first value for the named component of the query. - * The precedence order: - * 1. application/x-www-form-urlencoded form body (POST, PUT, PATCH only) - * 2. query parameters (always) - * 3. multipart/form-data form body (always) - * - * FormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] - * if necessary and ignores any errors returned by these functions. - * If key is not present, FormValue returns the empty string. - * To access multiple values of the same key, call ParseForm and - * then inspect [Request.Form] directly. - */ - formValue(key: string): string - } - interface Request { - /** - * PostFormValue returns the first value for the named component of the POST, - * PUT, or PATCH request body. URL query parameters are ignored. - * PostFormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary and ignores - * any errors returned by these functions. - * If key is not present, PostFormValue returns the empty string. - */ - postFormValue(key: string): string - } - interface Request { - /** - * FormFile returns the first file for the provided form key. - * FormFile calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary. - */ - formFile(key: string): [multipart.File, (multipart.FileHeader)] - } - interface Request { - /** - * PathValue returns the value for the named path wildcard in the [ServeMux] pattern - * that matched the request. - * It returns the empty string if the request was not matched against a pattern - * or there is no such wildcard in the pattern. - */ - pathValue(name: string): string - } - interface Request { - /** - * SetPathValue sets name to value, so that subsequent calls to r.PathValue(name) - * return value. - */ - setPathValue(name: string, value: string): void - } - /** - * A Handler responds to an HTTP request. - * - * [Handler.ServeHTTP] should write reply headers and data to the [ResponseWriter] - * and then return. Returning signals that the request is finished; it - * is not valid to use the [ResponseWriter] or read from the - * [Request.Body] after or concurrently with the completion of the - * ServeHTTP call. - * - * Depending on the HTTP client software, HTTP protocol version, and - * any intermediaries between the client and the Go server, it may not - * be possible to read from the [Request.Body] after writing to the - * [ResponseWriter]. Cautious handlers should read the [Request.Body] - * first, and then reply. - * - * Except for reading the body, handlers should not modify the - * provided Request. - * - * If ServeHTTP panics, the server (the caller of ServeHTTP) assumes - * that the effect of the panic was isolated to the active request. - * It recovers the panic, logs a stack trace to the server error log, - * and either closes the network connection or sends an HTTP/2 - * RST_STREAM, depending on the HTTP protocol. To abort a handler so - * the client sees an interrupted response but the server doesn't log - * an error, panic with the value [ErrAbortHandler]. - */ - interface Handler { - [key:string]: any; - serveHTTP(_arg0: ResponseWriter, _arg1: Request): void - } - /** - * A ResponseWriter interface is used by an HTTP handler to - * construct an HTTP response. - * - * A ResponseWriter may not be used after [Handler.ServeHTTP] has returned. - */ - interface ResponseWriter { - [key:string]: any; - /** - * Header returns the header map that will be sent by - * [ResponseWriter.WriteHeader]. The [Header] map also is the mechanism with which - * [Handler] implementations can set HTTP trailers. - * - * Changing the header map after a call to [ResponseWriter.WriteHeader] (or - * [ResponseWriter.Write]) has no effect unless the HTTP status code was of the - * 1xx class or the modified headers are trailers. - * - * There are two ways to set Trailers. The preferred way is to - * predeclare in the headers which trailers you will later - * send by setting the "Trailer" header to the names of the - * trailer keys which will come later. In this case, those - * keys of the Header map are treated as if they were - * trailers. See the example. The second way, for trailer - * keys not known to the [Handler] until after the first [ResponseWriter.Write], - * is to prefix the [Header] map keys with the [TrailerPrefix] - * constant value. - * - * To suppress automatic response headers (such as "Date"), set - * their value to nil. - */ - header(): Header - /** - * Write writes the data to the connection as part of an HTTP reply. - * - * If [ResponseWriter.WriteHeader] has not yet been called, Write calls - * WriteHeader(http.StatusOK) before writing the data. If the Header - * does not contain a Content-Type line, Write adds a Content-Type set - * to the result of passing the initial 512 bytes of written data to - * [DetectContentType]. Additionally, if the total size of all written - * data is under a few KB and there are no Flush calls, the - * Content-Length header is added automatically. - * - * Depending on the HTTP protocol version and the client, calling - * Write or WriteHeader may prevent future reads on the - * Request.Body. For HTTP/1.x requests, handlers should read any - * needed request body data before writing the response. Once the - * headers have been flushed (due to either an explicit Flusher.Flush - * call or writing enough data to trigger a flush), the request body - * may be unavailable. For HTTP/2 requests, the Go HTTP server permits - * handlers to continue to read the request body while concurrently - * writing the response. However, such behavior may not be supported - * by all HTTP/2 clients. Handlers should read before writing if - * possible to maximize compatibility. - */ - write(_arg0: string|Array): number - /** - * WriteHeader sends an HTTP response header with the provided - * status code. - * - * If WriteHeader is not called explicitly, the first call to Write - * will trigger an implicit WriteHeader(http.StatusOK). - * Thus explicit calls to WriteHeader are mainly used to - * send error codes or 1xx informational responses. - * - * The provided code must be a valid HTTP 1xx-5xx status code. - * Any number of 1xx headers may be written, followed by at most - * one 2xx-5xx header. 1xx headers are sent immediately, but 2xx-5xx - * headers may be buffered. Use the Flusher interface to send - * buffered data. The header map is cleared when 2xx-5xx headers are - * sent, but not with 1xx headers. - * - * The server will automatically send a 100 (Continue) header - * on the first read from the request body if the request has - * an "Expect: 100-continue" header. - */ - writeHeader(statusCode: number): void - } -} - -/** - * Package blob provides an easy and portable way to interact with blobs - * within a storage location. Subpackages contain driver implementations of - * blob for supported services. - * - * See https://gocloud.dev/howto/blob/ for a detailed how-to guide. - * - * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with - * functions in that package. - * - * # Errors - * - * The errors returned from this package can be inspected in several ways: - * - * The Code function from gocloud.dev/gcerrors will return an error code, also - * defined in that package, when invoked on an error. - * - * The Bucket.ErrorAs method can retrieve the driver error underlying the returned - * error. - * - * # OpenCensus Integration - * - * OpenCensus supports tracing and metric collection for multiple languages and - * backend providers. See https://opencensus.io. - * - * This API collects OpenCensus traces and metrics for the following methods: - * ``` - * - Attributes - * - Copy - * - Delete - * - ListPage - * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll - * are included because they call NewRangeReader.) - * - NewWriter, from creation until the call to Close. - * ``` - * - * All trace and metric names begin with the package import path. - * The traces add the method name. - * For example, "gocloud.dev/blob/Attributes". - * The metrics are "completed_calls", a count of completed method calls by driver, - * method and status (error code); and "latency", a distribution of method latency - * by driver and method. - * For example, "gocloud.dev/blob/latency". - * - * It also collects the following metrics: - * ``` - * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver. - * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver. - * ``` - * - * To enable trace collection in your application, see "Configure Exporter" at - * https://opencensus.io/quickstart/go/tracing. - * To enable metric collection in your application, see "Exporting stats" at - * https://opencensus.io/quickstart/go/metrics. - */ -namespace blob { - /** - * Reader reads bytes from a blob. - * It implements io.ReadSeekCloser, and must be closed after - * reads are finished. - */ - interface Reader { - } - interface Reader { - /** - * Read implements io.Reader (https://golang.org/pkg/io/#Reader). - */ - read(p: string|Array): number - } - interface Reader { - /** - * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). - */ - seek(offset: number, whence: number): number - } - interface Reader { - /** - * Close implements io.Closer (https://golang.org/pkg/io/#Closer). - */ - close(): void - } - interface Reader { - /** - * ContentType returns the MIME type of the blob. - */ - contentType(): string - } - interface Reader { - /** - * ModTime returns the time the blob was last modified. - */ - modTime(): time.Time - } - interface Reader { - /** - * Size returns the size of the blob content in bytes. - */ - size(): number - } - interface Reader { - /** - * As converts i to driver-specific types. - * See https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. - */ - as(i: { - }): boolean - } - interface Reader { - /** - * WriteTo reads from r and writes to w until there's no more data or - * an error occurs. - * The return value is the number of bytes written to w. - * - * It implements the io.WriterTo interface. - */ - writeTo(w: io.Writer): number - } - /** - * Attributes contains attributes about a blob. - */ - interface Attributes { - /** - * CacheControl specifies caching attributes that services may use - * when serving the blob. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control - */ - cacheControl: string - /** - * ContentDisposition specifies whether the blob content is expected to be - * displayed inline or as an attachment. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition - */ - contentDisposition: string - /** - * ContentEncoding specifies the encoding used for the blob's content, if any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding - */ - contentEncoding: string - /** - * ContentLanguage specifies the language used in the blob's content, if any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language - */ - contentLanguage: string - /** - * ContentType is the MIME type of the blob. It will not be empty. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type - */ - contentType: string - /** - * Metadata holds key/value pairs associated with the blob. - * Keys are guaranteed to be in lowercase, even if the backend service - * has case-sensitive keys (although note that Metadata written via - * this package will always be lowercased). If there are duplicate - * case-insensitive keys (e.g., "foo" and "FOO"), only one value - * will be kept, and it is undefined which one. - */ - metadata: _TygojaDict - /** - * CreateTime is the time the blob was created, if available. If not available, - * CreateTime will be the zero time. - */ - createTime: time.Time - /** - * ModTime is the time the blob was last modified. - */ - modTime: time.Time - /** - * Size is the size of the blob's content in bytes. - */ - size: number - /** - * MD5 is an MD5 hash of the blob contents or nil if not available. - */ - md5: string|Array - /** - * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. - */ - eTag: string - } - interface Attributes { - /** - * As converts i to driver-specific types. - * See https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. - */ - as(i: { - }): boolean - } - /** - * ListObject represents a single blob returned from List. - */ - interface ListObject { - /** - * Key is the key for this blob. - */ - key: string - /** - * ModTime is the time the blob was last modified. - */ - modTime: time.Time - /** - * Size is the size of the blob's content in bytes. - */ - size: number - /** - * MD5 is an MD5 hash of the blob contents or nil if not available. - */ - md5: string|Array - /** - * IsDir indicates that this result represents a "directory" in the - * hierarchical namespace, ending in ListOptions.Delimiter. Key can be - * passed as ListOptions.Prefix to list items in the "directory". - * Fields other than Key and IsDir will not be set if IsDir is true. - */ - isDir: boolean - } - interface ListObject { - /** - * As converts i to driver-specific types. - * See https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. - */ - as(i: { - }): boolean - } -} - -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { - /** - * DateTime represents a [time.Time] instance in UTC that is wrapped - * and serialized using the app default date layout. - */ - interface DateTime { - } - interface DateTime { - /** - * Time returns the internal [time.Time] instance. - */ - time(): time.Time - } - interface DateTime { - /** - * Add returns a new DateTime based on the current DateTime + the specified duration. - */ - add(duration: time.Duration): DateTime - } - interface DateTime { - /** - * Sub returns a [time.Duration] by substracting the specified DateTime from the current one. - * - * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], - * the maximum (or minimum) duration will be returned. - */ - sub(u: DateTime): time.Duration - } - interface DateTime { - /** - * AddDate returns a new DateTime based on the current one + duration. - * - * It follows the same rules as [time.AddDate]. - */ - addDate(years: number, months: number, days: number): DateTime - } - interface DateTime { - /** - * After reports whether the current DateTime instance is after u. - */ - after(u: DateTime): boolean - } - interface DateTime { - /** - * Before reports whether the current DateTime instance is before u. - */ - before(u: DateTime): boolean - } - interface DateTime { - /** - * Compare compares the current DateTime instance with u. - * If the current instance is before u, it returns -1. - * If the current instance is after u, it returns +1. - * If they're the same, it returns 0. - */ - compare(u: DateTime): number - } - interface DateTime { - /** - * Equal reports whether the current DateTime and u represent the same time instant. - * Two DateTime can be equal even if they are in different locations. - * For example, 6:00 +0200 and 4:00 UTC are Equal. - */ - equal(u: DateTime): boolean - } - interface DateTime { - /** - * Unix returns the current DateTime as a Unix time, aka. - * the number of seconds elapsed since January 1, 1970 UTC. - */ - unix(): number - } - interface DateTime { - /** - * IsZero checks whether the current DateTime instance has zero time value. - */ - isZero(): boolean - } - interface DateTime { - /** - * String serializes the current DateTime instance into a formatted - * UTC date string. - * - * The zero value is serialized to an empty string. - */ - string(): string - } - interface DateTime { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface DateTime { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface DateTime { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface DateTime { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current DateTime instance. - */ - scan(value: any): void - } -} - namespace hook { /** * HandlerFunc defines a hook handler function. @@ -10474,8 +8297,7 @@ namespace hook { */ id: string /** - * Priority allows changing the default exec priority of the handler - * withing a hook. + * Priority allows changing the default exec priority of the handler within a hook. * * If 0, the handler will be executed in the same order it was registered. */ @@ -10483,2156 +8305,18 @@ namespace hook { } } -namespace router { - // @ts-ignore - import validation = ozzo_validation - /** - * ApiError defines the struct for a basic api error response. - */ - interface ApiError { - data: _TygojaDict - message: string - status: number - } - interface ApiError { - /** - * Error makes it compatible with the `error` interface. - */ - error(): string - } - interface ApiError { - /** - * RawData returns the unformatted error data (could be an internal error, text, etc.) - */ - rawData(): any - } - interface ApiError { - /** - * Is reports whether the current ApiError wraps the target. - */ - is(target: Error): boolean - } - /** - * Router defines a thin wrapper around the standard Go [http.ServeMux] by - * adding support for routing sub-groups, middlewares and other common utils. - * - * Example: - * - * ``` - * r := NewRouter[*MyEvent](eventFactory) - * - * // middlewares - * r.BindFunc(m1, m2) - * - * // routes - * r.GET("/test", handler1) - * - * // sub-routers/groups - * api := r.Group("/api") - * api.GET("/admins", handler2) - * - * // generate a http.ServeMux instance based on the router configurations - * mux, _ := r.BuildMux() - * - * http.ListenAndServe("localhost:8090", mux) - * ``` - */ - type _subjGLEK = RouterGroup - interface Router extends _subjGLEK { - } - interface Router { - /** - * BuildMux constructs a new mux [http.Handler] instance from the current router configurations. - */ - buildMux(): http.Handler - } -} - /** - * Package core is the backbone of PocketBase. - * - * It defines the main PocketBase App interface and its base implementation. + * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer + * object, creating another object (Reader or Writer) that also implements + * the interface but provides buffering and some help for textual I/O. */ -namespace core { +namespace bufio { /** - * App defines the main PocketBase app interface. - * - * Note that the interface is not intended to be implemented manually by users - * and instead they should use core.BaseApp (either directly or as embedded field in a custom struct). - * - * This interface exists to make testing easier and to allow users to - * create common and pluggable helpers and methods that doesn't rely - * on a specific wrapped app struct (hence the large interface size). + * ReadWriter stores pointers to a [Reader] and a [Writer]. + * It implements [io.ReadWriter]. */ - interface App { - [key:string]: any; - /** - * UnsafeWithoutHooks returns a shallow copy of the current app WITHOUT any registered hooks. - * - * NB! Note that using the returned app instance may cause data integrity errors - * since the Record validations and data normalizations (including files uploads) - * rely on the app hooks to work. - */ - unsafeWithoutHooks(): App - /** - * Logger returns the default app logger. - * - * If the application is not bootstrapped yet, fallbacks to slog.Default(). - */ - logger(): (slog.Logger) - /** - * IsBootstrapped checks if the application was initialized - * (aka. whether Bootstrap() was called). - */ - isBootstrapped(): boolean - /** - * IsTransactional checks if the current app instance is part of a transaction. - */ - isTransactional(): boolean - /** - * Bootstrap initializes the application - * (aka. create data dir, open db connections, load settings, etc.). - * - * It will call ResetBootstrapState() if the application was already bootstrapped. - */ - bootstrap(): void - /** - * ResetBootstrapState releases the initialized core app resources - * (closing db connections, stopping cron ticker, etc.). - */ - resetBootstrapState(): void - /** - * DataDir returns the app data directory path. - */ - dataDir(): string - /** - * EncryptionEnv returns the name of the app secret env key - * (currently used primarily for optional settings encryption but this may change in the future). - */ - encryptionEnv(): string - /** - * IsDev returns whether the app is in dev mode. - * - * When enabled logs, executed sql statements, etc. are printed to the stderr. - */ - isDev(): boolean - /** - * Settings returns the loaded app settings. - */ - settings(): (Settings) - /** - * Store returns the app runtime store. - */ - store(): (store.Store) - /** - * Cron returns the app cron instance. - */ - cron(): (cron.Cron) - /** - * SubscriptionsBroker returns the app realtime subscriptions broker instance. - */ - subscriptionsBroker(): (subscriptions.Broker) - /** - * NewMailClient creates and returns a new SMTP or Sendmail client - * based on the current app settings. - */ - newMailClient(): mailer.Mailer - /** - * NewFilesystem creates a new local or S3 filesystem instance - * for managing regular app files (ex. record uploads) - * based on the current app settings. - * - * NB! Make sure to call Close() on the returned result - * after you are done working with it. - */ - newFilesystem(): (filesystem.System) - /** - * NewFilesystem creates a new local or S3 filesystem instance - * for managing app backups based on the current app settings. - * - * NB! Make sure to call Close() on the returned result - * after you are done working with it. - */ - newBackupsFilesystem(): (filesystem.System) - /** - * ReloadSettings reinitializes and reloads the stored application settings. - */ - reloadSettings(): void - /** - * CreateBackup creates a new backup of the current app pb_data directory. - * - * Backups can be stored on S3 if it is configured in app.Settings().Backups. - * - * Please refer to the godoc of the specific CoreApp implementation - * for details on the backup procedures. - */ - createBackup(ctx: context.Context, name: string): void - /** - * RestoreBackup restores the backup with the specified name and restarts - * the current running application process. - * - * The safely perform the restore it is recommended to have free disk space - * for at least 2x the size of the restored pb_data backup. - * - * Please refer to the godoc of the specific CoreApp implementation - * for details on the restore procedures. - * - * NB! This feature is experimental and currently is expected to work only on UNIX based systems. - */ - restoreBackup(ctx: context.Context, name: string): void - /** - * Restart restarts (aka. replaces) the current running application process. - * - * NB! It relies on execve which is supported only on UNIX based systems. - */ - restart(): void - /** - * RunSystemMigrations applies all new migrations registered in the [core.SystemMigrations] list. - */ - runSystemMigrations(): void - /** - * RunAppMigrations applies all new migrations registered in the [CoreAppMigrations] list. - */ - runAppMigrations(): void - /** - * RunAllMigrations applies all system and app migrations - * (aka. from both [core.SystemMigrations] and [CoreAppMigrations]). - */ - runAllMigrations(): void - /** - * DB returns the default app data db instance (pb_data/data.db). - */ - db(): dbx.Builder - /** - * NonconcurrentDB returns the nonconcurrent app data db instance (pb_data/data.db). - * - * The returned db instance is limited only to a single open connection, - * meaning that it can process only 1 db operation at a time (other operations will be queued up). - * - * This method is used mainly internally and in the tests to execute write - * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors. - * - * For the majority of cases you would want to use the regular DB() method - * since it allows concurrent db read operations. - * - * In a transaction the ConcurrentDB() and NonconcurrentDB() refer to the same *dbx.TX instance. - */ - nonconcurrentDB(): dbx.Builder - /** - * AuxDB returns the default app auxiliary db instance (pb_data/aux.db). - */ - auxDB(): dbx.Builder - /** - * AuxNonconcurrentDB returns the nonconcurrent app auxiliary db instance (pb_data/aux.db).. - * - * The returned db instance is limited only to a single open connection, - * meaning that it can process only 1 db operation at a time (other operations will be queued up). - * - * This method is used mainly internally and in the tests to execute write - * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors. - * - * For the majority of cases you would want to use the regular DB() method - * since it allows concurrent db read operations. - * - * In a transaction the AuxNonconcurrentDB() and AuxNonconcurrentDB() refer to the same *dbx.TX instance. - */ - auxNonconcurrentDB(): dbx.Builder - /** - * HasTable checks if a table (or view) with the provided name exists (case insensitive). - */ - hasTable(tableName: string): boolean - /** - * TableColumns returns all column names of a single table by its name. - */ - tableColumns(tableName: string): Array - /** - * TableInfo returns the "table_info" pragma result for the specified table. - */ - tableInfo(tableName: string): Array<(TableInfoRow | undefined)> - /** - * TableIndexes returns a name grouped map with all non empty index of the specified table. - * - * Note: This method doesn't return an error on nonexisting table. - */ - tableIndexes(tableName: string): _TygojaDict - /** - * DeleteTable drops the specified table. - * - * This method is a no-op if a table with the provided name doesn't exist. - * - * NB! Be aware that this method is vulnerable to SQL injection and the - * "tableName" argument must come only from trusted input! - */ - deleteTable(tableName: string): void - /** - * DeleteView drops the specified view name. - * - * This method is a no-op if a view with the provided name doesn't exist. - * - * NB! Be aware that this method is vulnerable to SQL injection and the - * "name" argument must come only from trusted input! - */ - deleteView(name: string): void - /** - * SaveView creates (or updates already existing) persistent SQL view. - * - * NB! Be aware that this method is vulnerable to SQL injection and the - * "selectQuery" argument must come only from trusted input! - */ - saveView(name: string, selectQuery: string): void - /** - * CreateViewFields creates a new FieldsList from the provided select query. - * - * There are some caveats: - * - The select query must have an "id" column. - * - Wildcard ("*") columns are not supported to avoid accidentally leaking sensitive data. - */ - createViewFields(selectQuery: string): FieldsList - /** - * FindRecordByViewFile returns the original Record of the provided view collection file. - */ - findRecordByViewFile(viewCollectionModelOrIdentifier: any, fileFieldName: string, filename: string): (Record) - /** - * Vacuum executes VACUUM on the current app.DB() instance - * in order to reclaim unused data db disk space. - */ - vacuum(): void - /** - * AuxVacuum executes VACUUM on the current app.AuxDB() instance - * in order to reclaim unused auxiliary db disk space. - */ - auxVacuum(): void - /** - * ModelQuery creates a new preconfigured select app.DB() query with preset - * SELECT, FROM and other common fields based on the provided model. - */ - modelQuery(model: Model): (dbx.SelectQuery) - /** - * AuxModelQuery creates a new preconfigured select app.AuxDB() query with preset - * SELECT, FROM and other common fields based on the provided model. - */ - auxModelQuery(model: Model): (dbx.SelectQuery) - /** - * Delete deletes the specified model from the regular app database. - */ - delete(model: Model): void - /** - * Delete deletes the specified model from the regular app database - * (the context could be used to limit the query execution). - */ - deleteWithContext(ctx: context.Context, model: Model): void - /** - * AuxDelete deletes the specified model from the auxiliary database. - */ - auxDelete(model: Model): void - /** - * AuxDeleteWithContext deletes the specified model from the auxiliary database - * (the context could be used to limit the query execution). - */ - auxDeleteWithContext(ctx: context.Context, model: Model): void - /** - * Save validates and saves the specified model into the regular app database. - * - * If you don't want to run validations, use [App.SaveNoValidate()]. - */ - save(model: Model): void - /** - * SaveWithContext is the same as [App.Save()] but allows specifying a context to limit the db execution. - * - * If you don't want to run validations, use [App.SaveNoValidateWithContext()]. - */ - saveWithContext(ctx: context.Context, model: Model): void - /** - * SaveNoValidate saves the specified model into the regular app database without performing validations. - * - * If you want to also run validations before persisting, use [App.Save()]. - */ - saveNoValidate(model: Model): void - /** - * SaveNoValidateWithContext is the same as [App.SaveNoValidate()] - * but allows specifying a context to limit the db execution. - * - * If you want to also run validations before persisting, use [App.SaveWithContext()]. - */ - saveNoValidateWithContext(ctx: context.Context, model: Model): void - /** - * AuxSave validates and saves the specified model into the auxiliary app database. - * - * If you don't want to run validations, use [App.AuxSaveNoValidate()]. - */ - auxSave(model: Model): void - /** - * AuxSaveWithContext is the same as [App.AuxSave()] but allows specifying a context to limit the db execution. - * - * If you don't want to run validations, use [App.AuxSaveNoValidateWithContext()]. - */ - auxSaveWithContext(ctx: context.Context, model: Model): void - /** - * AuxSaveNoValidate saves the specified model into the auxiliary app database without performing validations. - * - * If you want to also run validations before persisting, use [App.AuxSave()]. - */ - auxSaveNoValidate(model: Model): void - /** - * AuxSaveNoValidateWithContext is the same as [App.AuxSaveNoValidate()] - * but allows specifying a context to limit the db execution. - * - * If you want to also run validations before persisting, use [App.AuxSaveWithContext()]. - */ - auxSaveNoValidateWithContext(ctx: context.Context, model: Model): void - /** - * Validate triggers the OnModelValidate hook for the specified model. - */ - validate(model: Model): void - /** - * ValidateWithContext is the same as Validate but allows specifying the ModelEvent context. - */ - validateWithContext(ctx: context.Context, model: Model): void - /** - * RunInTransaction wraps fn into a transaction for the regular app database. - * - * It is safe to nest RunInTransaction calls as long as you use the callback's txApp. - */ - runInTransaction(fn: (txApp: App) => void): void - /** - * AuxRunInTransaction wraps fn into a transaction for the auxiliary app database. - * - * It is safe to nest RunInTransaction calls as long as you use the callback's txApp. - */ - auxRunInTransaction(fn: (txApp: App) => void): void - /** - * LogQuery returns a new Log select query. - */ - logQuery(): (dbx.SelectQuery) - /** - * FindLogById finds a single Log entry by its id. - */ - findLogById(id: string): (Log) - /** - * LogsStatsItem defines the total number of logs for a specific time period. - */ - logsStats(expr: dbx.Expression): Array<(LogsStatsItem | undefined)> - /** - * DeleteOldLogs delete all requests that are created before createdBefore. - */ - deleteOldLogs(createdBefore: time.Time): void - /** - * CollectionQuery returns a new Collection select query. - */ - collectionQuery(): (dbx.SelectQuery) - /** - * FindCollections finds all collections by the given type(s). - * - * If collectionTypes is not set, it returns all collections. - * - * Example: - * - * ``` - * app.FindAllCollections() // all collections - * app.FindAllCollections("auth", "view") // only auth and view collections - * ``` - */ - findAllCollections(...collectionTypes: string[]): Array<(Collection | undefined)> - /** - * ReloadCachedCollections fetches all collections and caches them into the app store. - */ - reloadCachedCollections(): void - /** - * FindCollectionByNameOrId finds a single collection by its name (case insensitive) or id.s - */ - findCollectionByNameOrId(nameOrId: string): (Collection) - /** - * FindCachedCollectionByNameOrId is similar to [App.FindCollectionByNameOrId] - * but retrieves the Collection from the app cache instead of making a db call. - * - * NB! This method is suitable for read-only Collection operations. - * - * Returns [sql.ErrNoRows] if no Collection is found for consistency - * with the [App.FindCollectionByNameOrId] method. - * - * If you plan making changes to the returned Collection model, - * use [App.FindCollectionByNameOrId] instead. - * - * Caveats: - * - * ``` - * - The returned Collection should be used only for read-only operations. - * Avoid directly modifying the returned cached Collection as it will affect - * the global cached value even if you don't persist the changes in the database! - * - If you are updating a Collection in a transaction and then call this method before commit, - * it'll return the cached Collection state and not the one from the uncommited transaction. - * - The cache is automatically updated on collections db change (create/update/delete). - * To manually reload the cache you can call [App.ReloadCachedCollections()] - * ``` - */ - findCachedCollectionByNameOrId(nameOrId: string): (Collection) - /** - * IsCollectionNameUnique checks that there is no existing collection - * with the provided name (case insensitive!). - * - * Note: case insensitive check because the name is used also as - * table name for the records. - */ - isCollectionNameUnique(name: string, ...excludeIds: string[]): boolean - /** - * FindCollectionReferences returns information for all relation - * fields referencing the provided collection. - * - * If the provided collection has reference to itself then it will be - * also included in the result. To exclude it, pass the collection id - * as the excludeIds argument. - */ - findCollectionReferences(collection: Collection, ...excludeIds: string[]): _TygojaDict - /** - * TruncateCollection deletes all records associated with the provided collection. - * - * The truncate operation is executed in a single transaction, - * aka. either everything is deleted or none. - * - * Note that this method will also trigger the records related - * cascade and file delete actions. - */ - truncateCollection(collection: Collection): void - /** - * ImportCollections imports the provided collections data in a single transaction. - * - * For existing matching collections, the imported data is unmarshaled on top of the existing model. - * - * NB! If deleteMissing is true, ALL NON-SYSTEM COLLECTIONS AND SCHEMA FIELDS, - * that are not present in the imported configuration, WILL BE DELETED - * (this includes their related records data). - */ - importCollections(toImport: Array<_TygojaDict>, deleteMissing: boolean): void - /** - * ImportCollectionsByMarshaledJSON is the same as [ImportCollections] - * but accept marshaled json array as import data (usually used for the autogenerated snapshots). - */ - importCollectionsByMarshaledJSON(rawSliceOfMaps: string|Array, deleteMissing: boolean): void - /** - * SyncRecordTableSchema compares the two provided collections - * and applies the necessary related record table changes. - * - * If oldCollection is null, then only newCollection is used to create the record table. - * - * This method is automatically invoked as part of a collection create/update/delete operation. - */ - syncRecordTableSchema(newCollection: Collection, oldCollection: Collection): void - /** - * FindAllExternalAuthsByRecord returns all ExternalAuth models - * linked to the provided auth record. - */ - findAllExternalAuthsByRecord(authRecord: Record): Array<(ExternalAuth | undefined)> - /** - * FindAllExternalAuthsByCollection returns all ExternalAuth models - * linked to the provided auth collection. - */ - findAllExternalAuthsByCollection(collection: Collection): Array<(ExternalAuth | undefined)> - /** - * FindFirstExternalAuthByExpr returns the first available (the most recent created) - * ExternalAuth model that satisfies the non-nil expression. - */ - findFirstExternalAuthByExpr(expr: dbx.Expression): (ExternalAuth) - /** - * FindAllMFAsByRecord returns all MFA models linked to the provided auth record. - */ - findAllMFAsByRecord(authRecord: Record): Array<(MFA | undefined)> - /** - * FindAllMFAsByCollection returns all MFA models linked to the provided collection. - */ - findAllMFAsByCollection(collection: Collection): Array<(MFA | undefined)> - /** - * FindMFAById retuns a single MFA model by its id. - */ - findMFAById(id: string): (MFA) - /** - * DeleteAllMFAsByRecord deletes all MFA models associated with the provided record. - * - * Returns a combined error with the failed deletes. - */ - deleteAllMFAsByRecord(authRecord: Record): void - /** - * DeleteExpiredMFAs deletes the expired MFAs for all auth collections. - */ - deleteExpiredMFAs(): void - /** - * FindAllOTPsByRecord returns all OTP models linked to the provided auth record. - */ - findAllOTPsByRecord(authRecord: Record): Array<(OTP | undefined)> - /** - * FindAllOTPsByCollection returns all OTP models linked to the provided collection. - */ - findAllOTPsByCollection(collection: Collection): Array<(OTP | undefined)> - /** - * FindOTPById retuns a single OTP model by its id. - */ - findOTPById(id: string): (OTP) - /** - * DeleteAllOTPsByRecord deletes all OTP models associated with the provided record. - * - * Returns a combined error with the failed deletes. - */ - deleteAllOTPsByRecord(authRecord: Record): void - /** - * DeleteExpiredOTPs deletes the expired OTPs for all auth collections. - */ - deleteExpiredOTPs(): void - /** - * FindAllAuthOriginsByRecord returns all AuthOrigin models linked to the provided auth record (in DESC order). - */ - findAllAuthOriginsByRecord(authRecord: Record): Array<(AuthOrigin | undefined)> - /** - * FindAllAuthOriginsByCollection returns all AuthOrigin models linked to the provided collection (in DESC order). - */ - findAllAuthOriginsByCollection(collection: Collection): Array<(AuthOrigin | undefined)> - /** - * FindAuthOriginById returns a single AuthOrigin model by its id. - */ - findAuthOriginById(id: string): (AuthOrigin) - /** - * FindAuthOriginByRecordAndFingerprint returns a single AuthOrigin model - * by its authRecord relation and fingerprint. - */ - findAuthOriginByRecordAndFingerprint(authRecord: Record, fingerprint: string): (AuthOrigin) - /** - * DeleteAllAuthOriginsByRecord deletes all AuthOrigin models associated with the provided record. - * - * Returns a combined error with the failed deletes. - */ - deleteAllAuthOriginsByRecord(authRecord: Record): void - /** - * RecordQuery returns a new Record select query from a collection model, id or name. - * - * In case a collection id or name is provided and that collection doesn't - * actually exists, the generated query will be created with a cancelled context - * and will fail once an executor (Row(), One(), All(), etc.) is called. - */ - recordQuery(collectionModelOrIdentifier: any): (dbx.SelectQuery) - /** - * FindRecordById finds the Record model by its id. - */ - findRecordById(collectionModelOrIdentifier: any, recordId: string, ...optFilters: ((q: dbx.SelectQuery) => void)[]): (Record) - /** - * FindRecordsByIds finds all records by the specified ids. - * If no records are found, returns an empty slice. - */ - findRecordsByIds(collectionModelOrIdentifier: any, recordIds: Array, ...optFilters: ((q: dbx.SelectQuery) => void)[]): Array<(Record | undefined)> - /** - * FindAllRecords finds all records matching specified db expressions. - * - * Returns all collection records if no expression is provided. - * - * Returns an empty slice if no records are found. - * - * Example: - * - * ``` - * // no extra expressions - * app.FindAllRecords("example") - * - * // with extra expressions - * expr1 := dbx.HashExp{"email": "test@example.com"} - * expr2 := dbx.NewExp("LOWER(username) = {:username}", dbx.Params{"username": "test"}) - * app.FindAllRecords("example", expr1, expr2) - * ``` - */ - findAllRecords(collectionModelOrIdentifier: any, ...exprs: dbx.Expression[]): Array<(Record | undefined)> - /** - * FindFirstRecordByData returns the first found record matching - * the provided key-value pair. - */ - findFirstRecordByData(collectionModelOrIdentifier: any, key: string, value: any): (Record) - /** - * FindRecordsByFilter returns limit number of records matching the - * provided string filter. - * - * NB! Use the last "params" argument to bind untrusted user variables! - * - * The filter argument is optional and can be empty string to target - * all available records. - * - * The sort argument is optional and can be empty string OR the same format - * used in the web APIs, ex. "-created,title". - * - * If the limit argument is <= 0, no limit is applied to the query and - * all matching records are returned. - * - * Returns an empty slice if no records are found. - * - * Example: - * - * ``` - * app.FindRecordsByFilter( - * "posts", - * "title ~ {:title} && visible = {:visible}", - * "-created", - * 10, - * 0, - * dbx.Params{"title": "lorem ipsum", "visible": true} - * ) - * ``` - */ - findRecordsByFilter(collectionModelOrIdentifier: any, filter: string, sort: string, limit: number, offset: number, ...params: dbx.Params[]): Array<(Record | undefined)> - /** - * FindFirstRecordByFilter returns the first available record matching the provided filter (if any). - * - * NB! Use the last params argument to bind untrusted user variables! - * - * Returns sql.ErrNoRows if no record is found. - * - * Example: - * - * ``` - * app.FindFirstRecordByFilter("posts", "") - * app.FindFirstRecordByFilter("posts", "slug={:slug} && status='public'", dbx.Params{"slug": "test"}) - * ``` - */ - findFirstRecordByFilter(collectionModelOrIdentifier: any, filter: string, ...params: dbx.Params[]): (Record) - /** - * CountRecords returns the total number of records in a collection. - */ - countRecords(collectionModelOrIdentifier: any, ...exprs: dbx.Expression[]): number - /** - * FindAuthRecordByToken finds the auth record associated with the provided JWT - * (auth, file, verifyEmail, changeEmail, passwordReset types). - * - * Optionally specify a list of validTypes to check tokens only from those types. - * - * Returns an error if the JWT is invalid, expired or not associated to an auth collection record. - */ - findAuthRecordByToken(token: string, ...validTypes: string[]): (Record) - /** - * FindAuthRecordByEmail finds the auth record associated with the provided email. - * - * Returns an error if it is not an auth collection or the record is not found. - */ - findAuthRecordByEmail(collectionModelOrIdentifier: any, email: string): (Record) - /** - * CanAccessRecord checks if a record is allowed to be accessed by the - * specified requestInfo and accessRule. - * - * Rule and db checks are ignored in case requestInfo.AuthRecord is a superuser. - * - * The returned error indicate that something unexpected happened during - * the check (eg. invalid rule or db query error). - * - * The method always return false on invalid rule or db query error. - * - * Example: - * - * ``` - * requestInfo, _ := e.RequestInfo() - * record, _ := app.FindRecordById("example", "RECORD_ID") - * rule := types.Pointer("@request.auth.id != '' || status = 'public'") - * // ... or use one of the record collection's rule, eg. record.Collection().ViewRule - * - * if ok, _ := app.CanAccessRecord(record, requestInfo, rule); ok { ... } - * ``` - */ - canAccessRecord(record: Record, requestInfo: RequestInfo, accessRule: string): boolean - /** - * ExpandRecord expands the relations of a single Record model. - * - * If optFetchFunc is not set, then a default function will be used - * that returns all relation records. - * - * Returns a map with the failed expand parameters and their errors. - */ - expandRecord(record: Record, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict - /** - * ExpandRecords expands the relations of the provided Record models list. - * - * If optFetchFunc is not set, then a default function will be used - * that returns all relation records. - * - * Returns a map with the failed expand parameters and their errors. - */ - expandRecords(records: Array<(Record | undefined)>, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict - /** - * OnBootstrap hook is triggered on initializing the main application - * resources (db, app settings, etc). - */ - onBootstrap(): (hook.Hook) - /** - * OnServe hook is triggered on when the app web server is started - * (after starting the tcp listener but before initializing the blocking serve task), - * allowing you to adjust its options and attach new routes or middlewares. - */ - onServe(): (hook.Hook) - /** - * OnTerminate hook is triggered when the app is in the process - * of being terminated (ex. on SIGTERM signal). - */ - onTerminate(): (hook.Hook) - /** - * OnBackupCreate hook is triggered on each [App.CreateBackup] call. - */ - onBackupCreate(): (hook.Hook) - /** - * OnBackupRestore hook is triggered before app backup restore (aka. [App.RestoreBackup] call). - * - * Note that by default on success the application is restarted and the after state of the hook is ignored. - */ - onBackupRestore(): (hook.Hook) - /** - * OnModelValidate is triggered every time when a model is being validated - * (e.g. triggered by App.Validate() or App.Save()). - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelValidate(...tags: string[]): (hook.TaggedHook) - /** - * OnModelCreate is triggered every time when a new model is being created - * (e.g. triggered by App.Save()). - * - * Operations BEFORE the e.Next() execute before the model validation - * and the INSERT DB statement. - * - * Operations AFTER the e.Next() execute after the model validation - * and the INSERT DB statement. - * - * Note that succesful execution doesn't guarantee that the model - * is persisted in the database since its wrapping transaction may - * not have been committed yet. - * If you wan to listen to only the actual persisted events, you can - * bind to [OnModelAfterCreateSuccess] or [OnModelAfterCreateError] hooks. - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelCreate(...tags: string[]): (hook.TaggedHook) - /** - * OnModelCreateExecute is triggered after successful Model validation - * and right before the model INSERT DB statement execution. - * - * Usually it is triggered as part of the App.Save() in the following firing order: - * OnModelCreate { - * ``` - * -> OnModelValidate (skipped with App.SaveNoValidate()) - * -> OnModelCreateExecute - * ``` - * } - * - * Note that succesful execution doesn't guarantee that the model - * is persisted in the database since its wrapping transaction may have been - * committed yet. - * If you wan to listen to only the actual persisted events, - * you can bind to [OnModelAfterCreateSuccess] or [OnModelAfterCreateError] hooks. - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelCreateExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnModelAfterCreateSuccess is triggered after each successful - * Model DB create persistence. - * - * Note that when a Model is persisted as part of a transaction, - * this hook is triggered AFTER the transaction has been commited. - * This hook is NOT triggered in case the transaction rollbacks - * (aka. when the model wasn't persisted). - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelAfterCreateSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnModelAfterCreateError is triggered after each failed - * Model DB create persistence. - * Note that when a Model is persisted as part of a transaction, - * this hook is triggered in one of the following cases: - * ``` - * - immediatelly after App.Save() failure - * - on transaction rollback - * ``` - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelAfterCreateError(...tags: string[]): (hook.TaggedHook) - /** - * OnModelUpdate is triggered every time when a new model is being updated - * (e.g. triggered by App.Save()). - * - * Operations BEFORE the e.Next() execute before the model validation - * and the UPDATE DB statement. - * - * Operations AFTER the e.Next() execute after the model validation - * and the UPDATE DB statement. - * - * Note that succesful execution doesn't guarantee that the model - * is persisted in the database since its wrapping transaction may - * not have been committed yet. - * If you wan to listen to only the actual persisted events, you can - * bind to [OnModelAfterUpdateSuccess] or [OnModelAfterUpdateError] hooks. - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelUpdate(...tags: string[]): (hook.TaggedHook) - /** - * OnModelUpdateExecute is triggered after successful Model validation - * and right before the model UPDATE DB statement execution. - * - * Usually it is triggered as part of the App.Save() in the following firing order: - * OnModelUpdate { - * ``` - * -> OnModelValidate (skipped with App.SaveNoValidate()) - * -> OnModelUpdateExecute - * ``` - * } - * - * Note that succesful execution doesn't guarantee that the model - * is persisted in the database since its wrapping transaction may have been - * committed yet. - * If you wan to listen to only the actual persisted events, - * you can bind to [OnModelAfterUpdateSuccess] or [OnModelAfterUpdateError] hooks. - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelUpdateExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnModelAfterUpdateSuccess is triggered after each successful - * Model DB update persistence. - * - * Note that when a Model is persisted as part of a transaction, - * this hook is triggered AFTER the transaction has been commited. - * This hook is NOT triggered in case the transaction rollbacks - * (aka. when the model changes weren't persisted). - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnModelAfterUpdateError is triggered after each failed - * Model DB update persistence. - * - * Note that when a Model is persisted as part of a transaction, - * this hook is triggered in one of the following cases: - * ``` - * - immediatelly after App.Save() failure - * - on transaction rollback - * ``` - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelAfterUpdateError(...tags: string[]): (hook.TaggedHook) - /** - * OnModelDelete is triggered every time when a new model is being deleted - * (e.g. triggered by App.Delete()). - * - * Note that succesful execution doesn't guarantee that the model - * is deleted from the database since its wrapping transaction may - * not have been committed yet. - * If you wan to listen to only the actual persisted deleted events, you can - * bind to [OnModelAfterDeleteSuccess] or [OnModelAfterDeleteError] hooks. - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelDelete(...tags: string[]): (hook.TaggedHook) - /** - * OnModelUpdateExecute is triggered right before the model - * DELETE DB statement execution. - * - * Usually it is triggered as part of the App.Delete() in the following firing order: - * OnModelDelete { - * ``` - * -> (internal delete checks) - * -> OnModelDeleteExecute - * ``` - * } - * - * Note that succesful execution doesn't guarantee that the model - * is deleted from the database since its wrapping transaction may - * not have been committed yet. - * If you wan to listen to only the actual persisted deleted events, you can - * bind to [OnModelAfterDeleteSuccess] or [OnModelAfterDeleteError] hooks. - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelDeleteExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnModelAfterDeleteSuccess is triggered after each successful - * Model DB delete persistence. - * - * Note that when a Model is deleted as part of a transaction, - * this hook is triggered AFTER the transaction has been commited. - * This hook is NOT triggered in case the transaction rollbacks - * (aka. when the model delete wasn't persisted). - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnModelAfterDeleteError is triggered after each failed - * Model DB delete persistence. - * - * Note that when a Model is deleted as part of a transaction, - * this hook is triggered in one of the following cases: - * ``` - * - immediatelly after App.Delete() failure - * - on transaction rollback - * ``` - * - * For convenience, if you want to listen to only the Record models - * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. - * - * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onModelAfterDeleteError(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordEnrich is triggered every time when a record is enriched - * (during realtime message seriazation, as part of the builtin Record - * responses, or when [apis.EnrichRecord] is invoked). - * - * It could be used for example to redact/hide or add computed temp - * Record model props only for the specific request info. For example: - * - * app.OnRecordEnrich("posts").BindFunc(func(e core.*RecordEnrichEvent) { - * ``` - * // hide one or more fields - * e.Record.Hide("role") - * - * // add new custom field for registered users - * if e.RequestInfo.Auth != nil && e.RequestInfo.Auth.Collection().Name == "users" { - * e.Record.WithCustomData(true) // for security requires explicitly allowing it - * e.Record.Set("computedScore", e.Record.GetInt("score") * e.RequestInfo.Auth.GetInt("baseScore")) - * } - * - * return e.Next() - * ``` - * }) - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordEnrich(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordValidate is a proxy Record model hook for [OnModelValidate]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordValidate(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordCreate is a proxy Record model hook for [OnModelCreate]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordCreate(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordCreateExecute is a proxy Record model hook for [OnModelCreateExecute]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordCreateExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAfterCreateSuccess is a proxy Record model hook for [OnModelAfterCreateSuccess]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAfterCreateSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAfterCreateError is a proxy Record model hook for [OnModelAfterCreateError]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAfterCreateError(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordUpdate is a proxy Record model hook for [OnModelUpdate]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordUpdate(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordUpdateExecute is a proxy Record model hook for [OnModelUpdateExecute]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordUpdateExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAfterUpdateSuccess is a proxy Record model hook for [OnModelAfterUpdateSuccess]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAfterUpdateError is a proxy Record model hook for [OnModelAfterUpdateError]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAfterUpdateError(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordDelete is a proxy Record model hook for [OnModelDelete]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordDelete(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordDeleteExecute is a proxy Record model hook for [OnModelDeleteExecute]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordDeleteExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAfterDeleteSuccess is a proxy Record model hook for [OnModelAfterDeleteSuccess]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAfterDeleteError is a proxy Record model hook for [OnModelAfterDeleteError]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAfterDeleteError(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionValidate is a proxy Collection model hook for [OnModelValidate]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionValidate(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionCreate is a proxy Collection model hook for [OnModelCreate]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionCreate(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionCreateExecute is a proxy Collection model hook for [OnModelCreateExecute]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionCreateExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionAfterCreateSuccess is a proxy Collection model hook for [OnModelAfterCreateSuccess]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionAfterCreateSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionAfterCreateError is a proxy Collection model hook for [OnModelAfterCreateError]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionAfterCreateError(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionUpdate is a proxy Collection model hook for [OnModelUpdate]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionUpdate(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionUpdateExecute is a proxy Collection model hook for [OnModelUpdateExecute]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionUpdateExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionAfterUpdateSuccess is a proxy Collection model hook for [OnModelAfterUpdateSuccess]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionAfterUpdateError is a proxy Collection model hook for [OnModelAfterUpdateError]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionAfterUpdateError(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionDelete is a proxy Collection model hook for [OnModelDelete]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionDelete(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionDeleteExecute is a proxy Collection model hook for [OnModelDeleteExecute]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionDeleteExecute(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionAfterDeleteSuccess is a proxy Collection model hook for [OnModelAfterDeleteSuccess]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionAfterDeleteError is a proxy Collection model hook for [OnModelAfterDeleteError]. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onCollectionAfterDeleteError(...tags: string[]): (hook.TaggedHook) - /** - * OnMailerSend hook is triggered every time when a new email is - * being send using the App.NewMailClient() instance. - * - * It allows intercepting the email message or to use a custom mailer client. - */ - onMailerSend(): (hook.Hook) - /** - * OnMailerRecordAuthAlertSend hook is triggered when - * sending a new device login auth alert email, allowing you to - * intercept and customize the email message that is being sent. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onMailerRecordAuthAlertSend(...tags: string[]): (hook.TaggedHook) - /** - * OnMailerBeforeRecordResetPasswordSend hook is triggered when - * sending a password reset email to an auth record, allowing - * you to intercept and customize the email message that is being sent. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onMailerRecordPasswordResetSend(...tags: string[]): (hook.TaggedHook) - /** - * OnMailerBeforeRecordVerificationSend hook is triggered when - * sending a verification email to an auth record, allowing - * you to intercept and customize the email message that is being sent. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onMailerRecordVerificationSend(...tags: string[]): (hook.TaggedHook) - /** - * OnMailerRecordEmailChangeSend hook is triggered when sending a - * confirmation new address email to an auth record, allowing - * you to intercept and customize the email message that is being sent. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onMailerRecordEmailChangeSend(...tags: string[]): (hook.TaggedHook) - /** - * OnMailerRecordOTPSend hook is triggered when sending an OTP email - * to an auth record, allowing you to intercept and customize the - * email message that is being sent. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onMailerRecordOTPSend(...tags: string[]): (hook.TaggedHook) - /** - * OnRealtimeConnectRequest hook is triggered when establishing the SSE client connection. - * - * Any execution after [e.Next()] of a hook handler happens after the client disconnects. - */ - onRealtimeConnectRequest(): (hook.Hook) - /** - * OnRealtimeMessageSend hook is triggered when sending an SSE message to a client. - */ - onRealtimeMessageSend(): (hook.Hook) - /** - * OnRealtimeSubscribeRequest hook is triggered when updating the - * client subscriptions, allowing you to further validate and - * modify the submitted change. - */ - onRealtimeSubscribeRequest(): (hook.Hook) - /** - * OnSettingsListRequest hook is triggered on each API Settings list request. - * - * Could be used to validate or modify the response before returning it to the client. - */ - onSettingsListRequest(): (hook.Hook) - /** - * OnSettingsUpdateRequest hook is triggered on each API Settings update request. - * - * Could be used to additionally validate the request data or - * implement completely different persistence behavior. - */ - onSettingsUpdateRequest(): (hook.Hook) - /** - * OnSettingsReload hook is triggered every time when the App.Settings() - * is being replaced with a new state. - * - * Calling App.Settings() after e.Next() should return the new state. - */ - onSettingsReload(): (hook.Hook) - /** - * OnFileDownloadRequest hook is triggered before each API File download request. - * - * Could be used to validate or modify the file response before - * returning it to the client. - */ - onFileDownloadRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnFileBeforeTokenRequest hook is triggered on each file token API request. - */ - onFileTokenRequest(): (hook.Hook) - /** - * OnRecordAuthRequest hook is triggered on each successful API - * record authentication request (sign-in, token refresh, etc.). - * - * Could be used to additionally validate or modify the authenticated - * record data and token. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAuthRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAuthWithPasswordRequest hook is triggered on each - * Record auth with password API request. - * - * RecordAuthWithPasswordRequestEvent.Record could be nil if no - * matching identity is found, allowing you to manually locate a different - * Record model (by reassigning [RecordAuthWithPasswordRequestEvent.Record]). - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAuthWithPasswordRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAuthWithOAuth2Request hook is triggered on each Record - * OAuth2 sign-in/sign-up API request (after token exchange and before external provider linking). - * - * If the [RecordAuthWithOAuth2RequestEvent.Record] is not set, then the OAuth2 - * request will try to create a new auth Record. - * - * To assign or link a different existing record model you can - * change the [RecordAuthWithOAuth2RequestEvent.Record] field. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAuthWithOAuth2Request(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAuthRefreshRequest hook is triggered on each Record - * auth refresh API request (right before generating a new auth token). - * - * Could be used to additionally validate the request data or implement - * completely different auth refresh behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAuthRefreshRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordRequestPasswordResetRequest hook is triggered on - * each Record request password reset API request. - * - * Could be used to additionally validate the request data or implement - * completely different password reset behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordRequestPasswordResetRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordConfirmPasswordResetRequest hook is triggered on - * each Record confirm password reset API request. - * - * Could be used to additionally validate the request data or implement - * completely different persistence behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordConfirmPasswordResetRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordRequestVerificationRequest hook is triggered on - * each Record request verification API request. - * - * Could be used to additionally validate the loaded request data or implement - * completely different verification behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordRequestVerificationRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordConfirmVerificationRequest hook is triggered on each - * Record confirm verification API request. - * - * Could be used to additionally validate the request data or implement - * completely different persistence behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordConfirmVerificationRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordRequestEmailChangeRequest hook is triggered on each - * Record request email change API request. - * - * Could be used to additionally validate the request data or implement - * completely different request email change behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordRequestEmailChangeRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordConfirmEmailChangeRequest hook is triggered on each - * Record confirm email change API request. - * - * Could be used to additionally validate the request data or implement - * completely different persistence behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordConfirmEmailChangeRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordRequestOTPRequest hook is triggered on each Record - * request OTP API request. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordRequestOTPRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordAuthWithOTPRequest hook is triggered on each Record - * auth with OTP API request. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordAuthWithOTPRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordsListRequest hook is triggered on each API Records list request. - * - * Could be used to validate or modify the response before returning it to the client. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordsListRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordViewRequest hook is triggered on each API Record view request. - * - * Could be used to validate or modify the response before returning it to the client. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordViewRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordCreateRequest hook is triggered on each API Record create request. - * - * Could be used to additionally validate the request data or implement - * completely different persistence behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordCreateRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordUpdateRequest hook is triggered on each API Record update request. - * - * Could be used to additionally validate the request data or implement - * completely different persistence behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordUpdateRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnRecordDeleteRequest hook is triggered on each API Record delete request. - * - * Could be used to additionally validate the request data or implement - * completely different delete behavior. - * - * If the optional "tags" list (Collection ids or names) is specified, - * then all event handlers registered via the created hook will be - * triggered and called only if their event data origin matches the tags. - */ - onRecordDeleteRequest(...tags: string[]): (hook.TaggedHook) - /** - * OnCollectionsListRequest hook is triggered on each API Collections list request. - * - * Could be used to validate or modify the response before returning it to the client. - */ - onCollectionsListRequest(): (hook.Hook) - /** - * OnCollectionViewRequest hook is triggered on each API Collection view request. - * - * Could be used to validate or modify the response before returning it to the client. - */ - onCollectionViewRequest(): (hook.Hook) - /** - * OnCollectionCreateRequest hook is triggered on each API Collection create request. - * - * Could be used to additionally validate the request data or implement - * completely different persistence behavior. - */ - onCollectionCreateRequest(): (hook.Hook) - /** - * OnCollectionUpdateRequest hook is triggered on each API Collection update request. - * - * Could be used to additionally validate the request data or implement - * completely different persistence behavior. - */ - onCollectionUpdateRequest(): (hook.Hook) - /** - * OnCollectionDeleteRequest hook is triggered on each API Collection delete request. - * - * Could be used to additionally validate the request data or implement - * completely different delete behavior. - */ - onCollectionDeleteRequest(): (hook.Hook) - /** - * OnCollectionsBeforeImportRequest hook is triggered on each API - * collections import request. - * - * Could be used to additionally validate the imported collections or - * to implement completely different import behavior. - */ - onCollectionsImportRequest(): (hook.Hook) - /** - * OnBatchRequest hook is triggered on each API batch request. - * - * Could be used to additionally validate or modify the submitted batch requests. - */ - onBatchRequest(): (hook.Hook) - } - // @ts-ignore - import validation = ozzo_validation - /** - * DBConnectFunc defines a database connection initialization function. - */ - interface DBConnectFunc {(dbPath: string): (dbx.DB) } - /** - * RequestEvent defines the PocketBase router handler event. - */ - type _subwWwMU = router.Event - interface RequestEvent extends _subwWwMU { - app: App - auth?: Record - } - interface RequestEvent { - /** - * RealIP returns the "real" IP address from the configured trusted proxy headers. - * - * If Settings.TrustedProxy is not configured or the found IP is empty, - * it fallbacks to e.RemoteIP(). - * - * NB! - * Be careful when used in a security critical context as it relies on - * the trusted proxy to be properly configured and your app to be accessible only through it. - * If you are not sure, use e.RemoteIP(). - */ - realIP(): string - } - interface RequestEvent { - /** - * HasSuperuserAuth checks whether the current RequestEvent has superuser authentication loaded. - */ - hasSuperuserAuth(): boolean - } - interface RequestEvent { - /** - * RequestInfo parses the current request into RequestInfo instance. - * - * Note that the returned result is cached to avoid copying the request data multiple times - * but the auth state and other common store items are always refreshed in case they were changed my another handler. - */ - requestInfo(): (RequestInfo) - } - interface InternalRequest { - /** - * note: for uploading files the value must be either *filesystem.File or []*filesystem.File - */ - body: _TygojaDict - headers: _TygojaDict - method: string - url: string - } - interface InternalRequest { - validate(): void - } - type _subxDBbH = BaseModel - interface Record extends _subxDBbH { - } - interface Record { - /** - * Collection returns the Collection model associated with the current Record model. - * - * NB! The returned collection is only for read purposes and it shouldn't be modified - * because it could have unintended side-effects on other Record models from the same collection. - */ - collection(): (Collection) - } - interface Record { - /** - * TableName returns the table name associated with the current Record model. - */ - tableName(): string - } - interface Record { - /** - * PostScan implements the [dbx.PostScanner] interface. - * - * It essentially refreshes/updates the current Record original state - * as if the model was fetched from the databases for the first time. - * - * Or in other words, it means that m.Original().FieldsData() will have - * the same values as m.Record().FieldsData(). - */ - postScan(): void - } - interface Record { - /** - * HookTags returns the hook tags associated with the current record. - */ - hookTags(): Array - } - interface Record { - /** - * BaseFilesPath returns the storage dir path used by the record. - */ - baseFilesPath(): string - } - interface Record { - /** - * Original returns a shallow copy of the current record model populated - * with its ORIGINAL db data state (aka. right after PostScan()) - * and everything else reset to the defaults. - * - * If record was created using NewRecord() the original will be always - * a blank record (until PostScan() is invoked). - */ - original(): (Record) - } - interface Record { - /** - * Fresh returns a shallow copy of the current record model populated - * with its LATEST data state and everything else reset to the defaults - * (aka. no expand, no unknown fields and with default visibility flags). - */ - fresh(): (Record) - } - interface Record { - /** - * Clone returns a shallow copy of the current record model with all of - * its collection and unknown fields data, expand and flags copied. - * - * use [Record.Fresh()] instead if you want a copy with only the latest - * collection fields data and everything else reset to the defaults. - */ - clone(): (Record) - } - interface Record { - /** - * Expand returns a shallow copy of the current Record model expand data (if any). - */ - expand(): _TygojaDict - } - interface Record { - /** - * SetExpand replaces the current Record's expand with the provided expand arg data (shallow copied). - */ - setExpand(expand: _TygojaDict): void - } - interface Record { - /** - * MergeExpand merges recursively the provided expand data into - * the current model's expand (if any). - * - * Note that if an expanded prop with the same key is a slice (old or new expand) - * then both old and new records will be merged into a new slice (aka. a :merge: [b,c] => [a,b,c]). - * Otherwise the "old" expanded record will be replace with the "new" one (aka. a :merge: aNew => aNew). - */ - mergeExpand(expand: _TygojaDict): void - } - interface Record { - /** - * FieldsData returns a shallow copy ONLY of the collection's fields record's data. - */ - fieldsData(): _TygojaDict - } - interface Record { - /** - * CustomData returns a shallow copy ONLY of the custom record fields data, - * aka. fields that are neither defined by the collection, nor special system ones. - * - * Note that custom fields prefixed with "@pbInternal" are always skipped. - */ - customData(): _TygojaDict - } - interface Record { - /** - * WithCustomData toggles the export/serialization of custom data fields - * (false by default). - */ - withCustomData(state: boolean): (Record) - } - interface Record { - /** - * IgnoreEmailVisibility toggles the flag to ignore the auth record email visibility check. - */ - ignoreEmailVisibility(state: boolean): (Record) - } - interface Record { - /** - * IgnoreUnchangedFields toggles the flag to ignore the unchanged fields - * from the DB export for the UPDATE SQL query. - * - * This could be used if you want to save only the record fields that you've changed - * without overwrite other untouched fields in case of concurrent update. - */ - ignoreUnchangedFields(state: boolean): (Record) - } - interface Record { - /** - * Set sets the provided key-value data pair into the current Record - * model directly as it is WITHOUT NORMALIZATIONS. - * - * See also [Record.Set]. - */ - setRaw(key: string, value: any): void - } - interface Record { - /** - * SetIfFieldExists sets the provided key-value data pair into the current Record model - * ONLY if key is existing Collection field name/modifier. - * - * This method does nothing if key is not a known Collection field name/modifier. - * - * On success returns the matched Field, otherwise - nil. - * - * To set any key-value, including custom/unknown fields, use the [Record.Set] method. - */ - setIfFieldExists(key: string, value: any): Field - } - interface Record { - /** - * Set sets the provided key-value data pair into the current Record model. - * - * If the record collection has field with name matching the provided "key", - * the value will be further normalized according to the field setter(s). - */ - set(key: string, value: any): void - } - interface Record { - getRaw(key: string): any - } - interface Record { - /** - * Get returns a normalized single record model data value for "key". - */ - get(key: string): any - } - interface Record { - /** - * Load bulk loads the provided data into the current Record model. - */ - load(data: _TygojaDict): void - } - interface Record { - /** - * GetBool returns the data value for "key" as a bool. - */ - getBool(key: string): boolean - } - interface Record { - /** - * GetString returns the data value for "key" as a string. - */ - getString(key: string): string - } - interface Record { - /** - * GetInt returns the data value for "key" as an int. - */ - getInt(key: string): number - } - interface Record { - /** - * GetFloat returns the data value for "key" as a float64. - */ - getFloat(key: string): number - } - interface Record { - /** - * GetDateTime returns the data value for "key" as a DateTime instance. - */ - getDateTime(key: string): types.DateTime - } - interface Record { - /** - * GetStringSlice returns the data value for "key" as a slice of non-zero unique strings. - */ - getStringSlice(key: string): Array - } - interface Record { - /** - * GetUploadedFiles returns the uploaded files for the provided "file" field key, - * (aka. the current [*filesytem.File] values) so that you can apply further - * validations or modifications (including changing the file name or content before persisting). - * - * Example: - * - * ``` - * files := record.GetUploadedFiles("documents") - * for _, f := range files { - * f.Name = "doc_" + f.Name // add a prefix to each file name - * } - * app.Save(record) // the files are pointers so the applied changes will transparently reflect on the record value - * ``` - */ - getUploadedFiles(key: string): Array<(filesystem.File | undefined)> - } - interface Record { - /** - * Retrieves the "key" json field value and unmarshals it into "result". - * - * Example - * - * ``` - * result := struct { - * FirstName string `json:"first_name"` - * }{} - * err := m.UnmarshalJSONField("my_field_name", &result) - * ``` - */ - unmarshalJSONField(key: string, result: any): void - } - interface Record { - /** - * ExpandedOne retrieves a single relation Record from the already - * loaded expand data of the current model. - * - * If the requested expand relation is multiple, this method returns - * only first available Record from the expanded relation. - * - * Returns nil if there is no such expand relation loaded. - */ - expandedOne(relField: string): (Record) - } - interface Record { - /** - * ExpandedAll retrieves a slice of relation Records from the already - * loaded expand data of the current model. - * - * If the requested expand relation is single, this method normalizes - * the return result and will wrap the single model as a slice. - * - * Returns nil slice if there is no such expand relation loaded. - */ - expandedAll(relField: string): Array<(Record | undefined)> - } - interface Record { - /** - * FindFileFieldByFile returns the first file type field for which - * any of the record's data contains the provided filename. - */ - findFileFieldByFile(filename: string): (FileField) - } - interface Record { - /** - * DBExport implements the [DBExporter] interface and returns a key-value - * map with the data to be persisted when saving the Record in the database. - */ - dbExport(app: App): _TygojaDict - } - interface Record { - /** - * Hide hides the specified fields from the public safe serialization of the record. - */ - hide(...fieldNames: string[]): (Record) - } - interface Record { - /** - * Unhide forces to unhide the specified fields from the public safe serialization - * of the record (even when the collection field itself is marked as hidden). - */ - unhide(...fieldNames: string[]): (Record) - } - interface Record { - /** - * PublicExport exports only the record fields that are safe to be public. - * - * To export unknown data fields you need to set record.WithCustomData(true). - * - * For auth records, to force the export of the email field you need to set - * record.IgnoreEmailVisibility(true). - */ - publicExport(): _TygojaDict - } - interface Record { - /** - * MarshalJSON implements the [json.Marshaler] interface. - * - * Only the data exported by `PublicExport()` will be serialized. - */ - marshalJSON(): string|Array - } - interface Record { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(data: string|Array): void - } - interface Record { - /** - * ReplaceModifiers returns a new map with applied modifier - * values based on the current record and the specified data. - * - * The resolved modifier keys will be removed. - * - * Multiple modifiers will be applied one after another, - * while reusing the previous base key value result (ex. 1; -5; +2 => -2). - * - * Note that because Go doesn't guaranteed the iteration order of maps, - * we would explicitly apply shorter keys first for a more consistent and reproducible behavior. - * - * Example usage: - * - * ``` - * newData := record.ReplaceModifiers(data) - * // record: {"field": 10} - * // data: {"field+": 5} - * // result: {"field": 15} - * ``` - */ - replaceModifiers(data: _TygojaDict): _TygojaDict - } - interface Record { - /** - * Email returns the "email" record field value (usually available with Auth collections). - */ - email(): string - } - interface Record { - /** - * SetEmail sets the "email" record field value (usually available with Auth collections). - */ - setEmail(email: string): void - } - interface Record { - /** - * Verified returns the "emailVisibility" record field value (usually available with Auth collections). - */ - emailVisibility(): boolean - } - interface Record { - /** - * SetEmailVisibility sets the "emailVisibility" record field value (usually available with Auth collections). - */ - setEmailVisibility(visible: boolean): void - } - interface Record { - /** - * Verified returns the "verified" record field value (usually available with Auth collections). - */ - verified(): boolean - } - interface Record { - /** - * SetVerified sets the "verified" record field value (usually available with Auth collections). - */ - setVerified(verified: boolean): void - } - interface Record { - /** - * TokenKey returns the "tokenKey" record field value (usually available with Auth collections). - */ - tokenKey(): string - } - interface Record { - /** - * SetTokenKey sets the "tokenKey" record field value (usually available with Auth collections). - */ - setTokenKey(key: string): void - } - interface Record { - /** - * RefreshTokenKey generates and sets a new random auth record "tokenKey". - */ - refreshTokenKey(): void - } - interface Record { - /** - * SetPassword sets the "password" record field value (usually available with Auth collections). - */ - setPassword(password: string): void - } - interface Record { - /** - * ValidatePassword validates a plain password against the "password" record field. - * - * Returns false if the password is incorrect. - */ - validatePassword(password: string): boolean - } - interface Record { - /** - * IsSuperuser returns whether the current record is a superuser, aka. - * whether the record is from the _superusers collection. - */ - isSuperuser(): boolean - } - interface Record { - /** - * NewStaticAuthToken generates and returns a new static record authentication token. - * - * Static auth tokens are similar to the regular auth tokens, but are - * non-refreshable and support custom duration. - * - * Zero or negative duration will fallback to the duration from the auth collection settings. - */ - newStaticAuthToken(duration: time.Duration): string - } - interface Record { - /** - * NewAuthToken generates and returns a new record authentication token. - */ - newAuthToken(): string - } - interface Record { - /** - * NewVerificationToken generates and returns a new record verification token. - */ - newVerificationToken(): string - } - interface Record { - /** - * NewPasswordResetToken generates and returns a new auth record password reset request token. - */ - newPasswordResetToken(): string - } - interface Record { - /** - * NewEmailChangeToken generates and returns a new auth record change email request token. - */ - newEmailChangeToken(newEmail: string): string - } - interface Record { - /** - * NewFileToken generates and returns a new record private file access token. - */ - newFileToken(): string + type _subhwPlm = Reader&Writer + interface ReadWriter extends _subhwPlm { } } @@ -13704,6 +9388,4314 @@ namespace cobra { } } +/** + * Package sql provides a generic interface around SQL (or SQL-like) + * databases. + * + * The sql package must be used in conjunction with a database driver. + * See https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until + * after the query is completed. + * + * For usage examples, see the wiki page at + * https://golang.org/s/sqlwiki. + */ +namespace sql { + /** + * TxOptions holds the transaction options to be used in [DB.BeginTx]. + */ + interface TxOptions { + /** + * Isolation is the transaction isolation level. + * If zero, the driver or database's default level is used. + */ + isolation: IsolationLevel + readOnly: boolean + } + /** + * DB is a database handle representing a pool of zero or more + * underlying connections. It's safe for concurrent use by multiple + * goroutines. + * + * The sql package creates and frees connections automatically; it + * also maintains a free pool of idle connections. If the database has + * a concept of per-connection state, such state can be reliably observed + * within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the + * returned [Tx] is bound to a single connection. Once [Tx.Commit] or + * [Tx.Rollback] is called on the transaction, that transaction's + * connection is returned to [DB]'s idle connection pool. The pool size + * can be controlled with [DB.SetMaxIdleConns]. + */ + interface DB { + } + interface DB { + /** + * PingContext verifies a connection to the database is still alive, + * establishing a connection if necessary. + */ + pingContext(ctx: context.Context): void + } + interface DB { + /** + * Ping verifies a connection to the database is still alive, + * establishing a connection if necessary. + * + * Ping uses [context.Background] internally; to specify the context, use + * [DB.PingContext]. + */ + ping(): void + } + interface DB { + /** + * Close closes the database and prevents new queries from starting. + * Close then waits for all queries that have started processing on the server + * to finish. + * + * It is rare to Close a [DB], as the [DB] handle is meant to be + * long-lived and shared between many goroutines. + */ + close(): void + } + interface DB { + /** + * SetMaxIdleConns sets the maximum number of connections in the idle + * connection pool. + * + * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns, + * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit. + * + * If n <= 0, no idle connections are retained. + * + * The default max idle connections is currently 2. This may change in + * a future release. + */ + setMaxIdleConns(n: number): void + } + interface DB { + /** + * SetMaxOpenConns sets the maximum number of open connections to the database. + * + * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than + * MaxIdleConns, then MaxIdleConns will be reduced to match the new + * MaxOpenConns limit. + * + * If n <= 0, then there is no limit on the number of open connections. + * The default is 0 (unlimited). + */ + setMaxOpenConns(n: number): void + } + interface DB { + /** + * SetConnMaxLifetime sets the maximum amount of time a connection may be reused. + * + * Expired connections may be closed lazily before reuse. + * + * If d <= 0, connections are not closed due to a connection's age. + */ + setConnMaxLifetime(d: time.Duration): void + } + interface DB { + /** + * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle. + * + * Expired connections may be closed lazily before reuse. + * + * If d <= 0, connections are not closed due to a connection's idle time. + */ + setConnMaxIdleTime(d: time.Duration): void + } + interface DB { + /** + * Stats returns database statistics. + */ + stats(): DBStats + } + interface DB { + /** + * PrepareContext creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface DB { + /** + * Prepare creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * Prepare uses [context.Background] internally; to specify the context, use + * [DB.PrepareContext]. + */ + prepare(query: string): (Stmt) + } + interface DB { + /** + * ExecContext executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface DB { + /** + * Exec executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + * + * Exec uses [context.Background] internally; to specify the context, use + * [DB.ExecContext]. + */ + exec(query: string, ...args: any[]): Result + } + interface DB { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface DB { + /** + * Query executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + * + * Query uses [context.Background] internally; to specify the context, use + * [DB.QueryContext]. + */ + query(query: string, ...args: any[]): (Rows) + } + interface DB { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface DB { + /** + * QueryRow executes a query that is expected to return at most one row. + * QueryRow always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, [*Row.Scan] scans the first selected row and discards + * the rest. + * + * QueryRow uses [context.Background] internally; to specify the context, use + * [DB.QueryRowContext]. + */ + queryRow(query: string, ...args: any[]): (Row) + } + interface DB { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled back. + * If the context is canceled, the sql package will roll back + * the transaction. [Tx.Commit] will return an error if the context provided to + * BeginTx is canceled. + * + * The provided [TxOptions] is optional and may be nil if defaults should be used. + * If a non-default isolation level is used that the driver doesn't support, + * an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): (Tx) + } + interface DB { + /** + * Begin starts a transaction. The default isolation level is dependent on + * the driver. + * + * Begin uses [context.Background] internally; to specify the context, use + * [DB.BeginTx]. + */ + begin(): (Tx) + } + interface DB { + /** + * Driver returns the database's underlying driver. + */ + driver(): any + } + interface DB { + /** + * Conn returns a single connection by either opening a new connection + * or returning an existing connection from the connection pool. Conn will + * block until either a connection is returned or ctx is canceled. + * Queries run on the same Conn will be run in the same database session. + * + * Every Conn must be returned to the database pool after use by + * calling [Conn.Close]. + */ + conn(ctx: context.Context): (Conn) + } + /** + * Tx is an in-progress database transaction. + * + * A transaction must end with a call to [Tx.Commit] or [Tx.Rollback]. + * + * After a call to [Tx.Commit] or [Tx.Rollback], all operations on the + * transaction fail with [ErrTxDone]. + * + * The statements prepared for a transaction by calling + * the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed + * by the call to [Tx.Commit] or [Tx.Rollback]. + */ + interface Tx { + } + interface Tx { + /** + * Commit commits the transaction. + */ + commit(): void + } + interface Tx { + /** + * Rollback aborts the transaction. + */ + rollback(): void + } + interface Tx { + /** + * PrepareContext creates a prepared statement for use within a transaction. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * To use an existing prepared statement on this transaction, see [Tx.Stmt]. + * + * The provided context will be used for the preparation of the context, not + * for the execution of the returned statement. The returned statement + * will run in the transaction context. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface Tx { + /** + * Prepare creates a prepared statement for use within a transaction. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * To use an existing prepared statement on this transaction, see [Tx.Stmt]. + * + * Prepare uses [context.Background] internally; to specify the context, use + * [Tx.PrepareContext]. + */ + prepare(query: string): (Stmt) + } + interface Tx { + /** + * StmtContext returns a transaction-specific prepared statement from + * an existing statement. + * + * Example: + * + * ``` + * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") + * ... + * tx, err := db.Begin() + * ... + * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203) + * ``` + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + */ + stmtContext(ctx: context.Context, stmt: Stmt): (Stmt) + } + interface Tx { + /** + * Stmt returns a transaction-specific prepared statement from + * an existing statement. + * + * Example: + * + * ``` + * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") + * ... + * tx, err := db.Begin() + * ... + * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203) + * ``` + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * Stmt uses [context.Background] internally; to specify the context, use + * [Tx.StmtContext]. + */ + stmt(stmt: Stmt): (Stmt) + } + interface Tx { + /** + * ExecContext executes a query that doesn't return rows. + * For example: an INSERT and UPDATE. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Tx { + /** + * Exec executes a query that doesn't return rows. + * For example: an INSERT and UPDATE. + * + * Exec uses [context.Background] internally; to specify the context, use + * [Tx.ExecContext]. + */ + exec(query: string, ...args: any[]): Result + } + interface Tx { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface Tx { + /** + * Query executes a query that returns rows, typically a SELECT. + * + * Query uses [context.Background] internally; to specify the context, use + * [Tx.QueryContext]. + */ + query(query: string, ...args: any[]): (Rows) + } + interface Tx { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface Tx { + /** + * QueryRow executes a query that is expected to return at most one row. + * QueryRow always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + * + * QueryRow uses [context.Background] internally; to specify the context, use + * [Tx.QueryRowContext]. + */ + queryRow(query: string, ...args: any[]): (Row) + } + /** + * Stmt is a prepared statement. + * A Stmt is safe for concurrent use by multiple goroutines. + * + * If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single + * underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will + * become unusable and all operations will return an error. + * If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the + * [DB]. When the Stmt needs to execute on a new underlying connection, it will + * prepare itself on the new connection automatically. + */ + interface Stmt { + } + interface Stmt { + /** + * ExecContext executes a prepared statement with the given arguments and + * returns a [Result] summarizing the effect of the statement. + */ + execContext(ctx: context.Context, ...args: any[]): Result + } + interface Stmt { + /** + * Exec executes a prepared statement with the given arguments and + * returns a [Result] summarizing the effect of the statement. + * + * Exec uses [context.Background] internally; to specify the context, use + * [Stmt.ExecContext]. + */ + exec(...args: any[]): Result + } + interface Stmt { + /** + * QueryContext executes a prepared query statement with the given arguments + * and returns the query results as a [*Rows]. + */ + queryContext(ctx: context.Context, ...args: any[]): (Rows) + } + interface Stmt { + /** + * Query executes a prepared query statement with the given arguments + * and returns the query results as a *Rows. + * + * Query uses [context.Background] internally; to specify the context, use + * [Stmt.QueryContext]. + */ + query(...args: any[]): (Rows) + } + interface Stmt { + /** + * QueryRowContext executes a prepared query statement with the given arguments. + * If an error occurs during the execution of the statement, that error will + * be returned by a call to Scan on the returned [*Row], which is always non-nil. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, ...args: any[]): (Row) + } + interface Stmt { + /** + * QueryRow executes a prepared query statement with the given arguments. + * If an error occurs during the execution of the statement, that error will + * be returned by a call to Scan on the returned [*Row], which is always non-nil. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + * + * Example usage: + * + * ``` + * var name string + * err := nameByUseridStmt.QueryRow(id).Scan(&name) + * ``` + * + * QueryRow uses [context.Background] internally; to specify the context, use + * [Stmt.QueryRowContext]. + */ + queryRow(...args: any[]): (Row) + } + interface Stmt { + /** + * Close closes the statement. + */ + close(): void + } + /** + * Rows is the result of a query. Its cursor starts before the first row + * of the result set. Use [Rows.Next] to advance from row to row. + */ + interface Rows { + } + interface Rows { + /** + * Next prepares the next result row for reading with the [Rows.Scan] method. It + * returns true on success, or false if there is no next result row or an error + * happened while preparing it. [Rows.Err] should be consulted to distinguish between + * the two cases. + * + * Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next]. + */ + next(): boolean + } + interface Rows { + /** + * NextResultSet prepares the next result set for reading. It reports whether + * there is further result sets, or false if there is no further result set + * or if there is an error advancing to it. The [Rows.Err] method should be consulted + * to distinguish between the two cases. + * + * After calling NextResultSet, the [Rows.Next] method should always be called before + * scanning. If there are further result sets they may not have rows in the result + * set. + */ + nextResultSet(): boolean + } + interface Rows { + /** + * Err returns the error, if any, that was encountered during iteration. + * Err may be called after an explicit or implicit [Rows.Close]. + */ + err(): void + } + interface Rows { + /** + * Columns returns the column names. + * Columns returns an error if the rows are closed. + */ + columns(): Array + } + interface Rows { + /** + * ColumnTypes returns column information such as column type, length, + * and nullable. Some information may not be available from some drivers. + */ + columnTypes(): Array<(ColumnType | undefined)> + } + interface Rows { + /** + * Scan copies the columns in the current row into the values pointed + * at by dest. The number of values in dest must be the same as the + * number of columns in [Rows]. + * + * Scan converts columns read from the database into the following + * common Go types and special types provided by the sql package: + * + * ``` + * *string + * *[]byte + * *int, *int8, *int16, *int32, *int64 + * *uint, *uint8, *uint16, *uint32, *uint64 + * *bool + * *float32, *float64 + * *interface{} + * *RawBytes + * *Rows (cursor value) + * any type implementing Scanner (see Scanner docs) + * ``` + * + * In the most simple case, if the type of the value from the source + * column is an integer, bool or string type T and dest is of type *T, + * Scan simply assigns the value through the pointer. + * + * Scan also converts between string and numeric types, as long as no + * information would be lost. While Scan stringifies all numbers + * scanned from numeric database columns into *string, scans into + * numeric types are checked for overflow. For example, a float64 with + * value 300 or a string with value "300" can scan into a uint16, but + * not into a uint8, though float64(255) or "255" can scan into a + * uint8. One exception is that scans of some float64 numbers to + * strings may lose information when stringifying. In general, scan + * floating point columns into *float64. + * + * If a dest argument has type *[]byte, Scan saves in that argument a + * copy of the corresponding data. The copy is owned by the caller and + * can be modified and held indefinitely. The copy can be avoided by + * using an argument of type [*RawBytes] instead; see the documentation + * for [RawBytes] for restrictions on its use. + * + * If an argument has type *interface{}, Scan copies the value + * provided by the underlying driver without conversion. When scanning + * from a source value of type []byte to *interface{}, a copy of the + * slice is made and the caller owns the result. + * + * Source values of type [time.Time] may be scanned into values of type + * *time.Time, *interface{}, *string, or *[]byte. When converting to + * the latter two, [time.RFC3339Nano] is used. + * + * Source values of type bool may be scanned into types *bool, + * *interface{}, *string, *[]byte, or [*RawBytes]. + * + * For scanning into *bool, the source may be true, false, 1, 0, or + * string inputs parseable by [strconv.ParseBool]. + * + * Scan can also convert a cursor returned from a query, such as + * "select cursor(select * from my_table) from dual", into a + * [*Rows] value that can itself be scanned from. The parent + * select query will close any cursor [*Rows] if the parent [*Rows] is closed. + * + * If any of the first arguments implementing [Scanner] returns an error, + * that error will be wrapped in the returned error. + */ + scan(...dest: any[]): void + } + interface Rows { + /** + * Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called + * and returns false and there are no further result sets, + * the [Rows] are closed automatically and it will suffice to check the + * result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err]. + */ + close(): void + } + /** + * A Result summarizes an executed SQL command. + */ + interface Result { + [key:string]: any; + /** + * LastInsertId returns the integer generated by the database + * in response to a command. Typically this will be from an + * "auto increment" column when inserting a new row. Not all + * databases support this feature, and the syntax of such + * statements varies. + */ + lastInsertId(): number + /** + * RowsAffected returns the number of rows affected by an + * update, insert, or delete. Not every database or database + * driver may support this. + */ + rowsAffected(): number + } +} + +/** + * Package multipart implements MIME multipart parsing, as defined in RFC + * 2046. + * + * The implementation is sufficient for HTTP (RFC 2388) and the multipart + * bodies generated by popular browsers. + * + * # Limits + * + * To protect against malicious inputs, this package sets limits on the size + * of the MIME data it processes. + * + * [Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a + * part to 10000 and [Reader.ReadForm] limits the total number of headers in all + * FileHeaders to 10000. + * These limits may be adjusted with the GODEBUG=multipartmaxheaders= + * setting. + * + * Reader.ReadForm further limits the number of parts in a form to 1000. + * This limit may be adjusted with the GODEBUG=multipartmaxparts= + * setting. + */ +namespace multipart { + /** + * A FileHeader describes a file part of a multipart request. + */ + interface FileHeader { + filename: string + header: textproto.MIMEHeader + size: number + } + interface FileHeader { + /** + * Open opens and returns the [FileHeader]'s associated File. + */ + open(): File + } +} + +/** + * Package http provides HTTP client and server implementations. + * + * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: + * + * ``` + * resp, err := http.Get("http://example.com/") + * ... + * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) + * ... + * resp, err := http.PostForm("http://example.com/form", + * url.Values{"key": {"Value"}, "id": {"123"}}) + * ``` + * + * The caller must close the response body when finished with it: + * + * ``` + * resp, err := http.Get("http://example.com/") + * if err != nil { + * // handle error + * } + * defer resp.Body.Close() + * body, err := io.ReadAll(resp.Body) + * // ... + * ``` + * + * # Clients and Transports + * + * For control over HTTP client headers, redirect policy, and other + * settings, create a [Client]: + * + * ``` + * client := &http.Client{ + * CheckRedirect: redirectPolicyFunc, + * } + * + * resp, err := client.Get("http://example.com") + * // ... + * + * req, err := http.NewRequest("GET", "http://example.com", nil) + * // ... + * req.Header.Add("If-None-Match", `W/"wyzzy"`) + * resp, err := client.Do(req) + * // ... + * ``` + * + * For control over proxies, TLS configuration, keep-alives, + * compression, and other settings, create a [Transport]: + * + * ``` + * tr := &http.Transport{ + * MaxIdleConns: 10, + * IdleConnTimeout: 30 * time.Second, + * DisableCompression: true, + * } + * client := &http.Client{Transport: tr} + * resp, err := client.Get("https://example.com") + * ``` + * + * Clients and Transports are safe for concurrent use by multiple + * goroutines and for efficiency should only be created once and re-used. + * + * # Servers + * + * ListenAndServe starts an HTTP server with a given address and handler. + * The handler is usually nil, which means to use [DefaultServeMux]. + * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: + * + * ``` + * http.Handle("/foo", fooHandler) + * + * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { + * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) + * }) + * + * log.Fatal(http.ListenAndServe(":8080", nil)) + * ``` + * + * More control over the server's behavior is available by creating a + * custom Server: + * + * ``` + * s := &http.Server{ + * Addr: ":8080", + * Handler: myHandler, + * ReadTimeout: 10 * time.Second, + * WriteTimeout: 10 * time.Second, + * MaxHeaderBytes: 1 << 20, + * } + * log.Fatal(s.ListenAndServe()) + * ``` + * + * # HTTP/2 + * + * Starting with Go 1.6, the http package has transparent support for the + * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 + * can do so by setting [Transport.TLSNextProto] (for clients) or + * [Server.TLSNextProto] (for servers) to a non-nil, empty + * map. Alternatively, the following GODEBUG settings are + * currently supported: + * + * ``` + * GODEBUG=http2client=0 # disable HTTP/2 client support + * GODEBUG=http2server=0 # disable HTTP/2 server support + * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs + * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps + * ``` + * + * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug + * + * The http package's [Transport] and [Server] both automatically enable + * HTTP/2 support for simple configurations. To enable HTTP/2 for more + * complex configurations, to use lower-level HTTP/2 features, or to use + * a newer version of Go's http2 package, import "golang.org/x/net/http2" + * directly and use its ConfigureTransport and/or ConfigureServer + * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 + * package takes precedence over the net/http package's built-in HTTP/2 + * support. + */ +namespace http { + // @ts-ignore + import mathrand = rand + /** + * PushOptions describes options for [Pusher.Push]. + */ + interface PushOptions { + /** + * Method specifies the HTTP method for the promised request. + * If set, it must be "GET" or "HEAD". Empty means "GET". + */ + method: string + /** + * Header specifies additional promised request headers. This cannot + * include HTTP/2 pseudo header fields like ":path" and ":scheme", + * which will be added automatically. + */ + header: Header + } + // @ts-ignore + import urlpkg = url + /** + * A Request represents an HTTP request received by a server + * or to be sent by a client. + * + * The field semantics differ slightly between client and server + * usage. In addition to the notes on the fields below, see the + * documentation for [Request.Write] and [RoundTripper]. + */ + interface Request { + /** + * Method specifies the HTTP method (GET, POST, PUT, etc.). + * For client requests, an empty string means GET. + */ + method: string + /** + * URL specifies either the URI being requested (for server + * requests) or the URL to access (for client requests). + * + * For server requests, the URL is parsed from the URI + * supplied on the Request-Line as stored in RequestURI. For + * most requests, fields other than Path and RawQuery will be + * empty. (See RFC 7230, Section 5.3) + * + * For client requests, the URL's Host specifies the server to + * connect to, while the Request's Host field optionally + * specifies the Host header value to send in the HTTP + * request. + */ + url?: url.URL + /** + * The protocol version for incoming server requests. + * + * For client requests, these fields are ignored. The HTTP + * client code always uses either HTTP/1.1 or HTTP/2. + * See the docs on Transport for details. + */ + proto: string // "HTTP/1.0" + protoMajor: number // 1 + protoMinor: number // 0 + /** + * Header contains the request header fields either received + * by the server or to be sent by the client. + * + * If a server received a request with header lines, + * + * ``` + * Host: example.com + * accept-encoding: gzip, deflate + * Accept-Language: en-us + * fOO: Bar + * foo: two + * ``` + * + * then + * + * ``` + * Header = map[string][]string{ + * "Accept-Encoding": {"gzip, deflate"}, + * "Accept-Language": {"en-us"}, + * "Foo": {"Bar", "two"}, + * } + * ``` + * + * For incoming requests, the Host header is promoted to the + * Request.Host field and removed from the Header map. + * + * HTTP defines that header names are case-insensitive. The + * request parser implements this by using CanonicalHeaderKey, + * making the first character and any characters following a + * hyphen uppercase and the rest lowercase. + * + * For client requests, certain headers such as Content-Length + * and Connection are automatically written when needed and + * values in Header may be ignored. See the documentation + * for the Request.Write method. + */ + header: Header + /** + * Body is the request's body. + * + * For client requests, a nil body means the request has no + * body, such as a GET request. The HTTP Client's Transport + * is responsible for calling the Close method. + * + * For server requests, the Request Body is always non-nil + * but will return EOF immediately when no body is present. + * The Server will close the request body. The ServeHTTP + * Handler does not need to. + * + * Body must allow Read to be called concurrently with Close. + * In particular, calling Close should unblock a Read waiting + * for input. + */ + body: io.ReadCloser + /** + * GetBody defines an optional func to return a new copy of + * Body. It is used for client requests when a redirect requires + * reading the body more than once. Use of GetBody still + * requires setting Body. + * + * For server requests, it is unused. + */ + getBody: () => io.ReadCloser + /** + * ContentLength records the length of the associated content. + * The value -1 indicates that the length is unknown. + * Values >= 0 indicate that the given number of bytes may + * be read from Body. + * + * For client requests, a value of 0 with a non-nil Body is + * also treated as unknown. + */ + contentLength: number + /** + * TransferEncoding lists the transfer encodings from outermost to + * innermost. An empty list denotes the "identity" encoding. + * TransferEncoding can usually be ignored; chunked encoding is + * automatically added and removed as necessary when sending and + * receiving requests. + */ + transferEncoding: Array + /** + * Close indicates whether to close the connection after + * replying to this request (for servers) or after sending this + * request and reading its response (for clients). + * + * For server requests, the HTTP server handles this automatically + * and this field is not needed by Handlers. + * + * For client requests, setting this field prevents re-use of + * TCP connections between requests to the same hosts, as if + * Transport.DisableKeepAlives were set. + */ + close: boolean + /** + * For server requests, Host specifies the host on which the + * URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this + * is either the value of the "Host" header or the host name + * given in the URL itself. For HTTP/2, it is the value of the + * ":authority" pseudo-header field. + * It may be of the form "host:port". For international domain + * names, Host may be in Punycode or Unicode form. Use + * golang.org/x/net/idna to convert it to either format if + * needed. + * To prevent DNS rebinding attacks, server Handlers should + * validate that the Host header has a value for which the + * Handler considers itself authoritative. The included + * ServeMux supports patterns registered to particular host + * names and thus protects its registered Handlers. + * + * For client requests, Host optionally overrides the Host + * header to send. If empty, the Request.Write method uses + * the value of URL.Host. Host may contain an international + * domain name. + */ + host: string + /** + * Form contains the parsed form data, including both the URL + * field's query parameters and the PATCH, POST, or PUT form data. + * This field is only available after ParseForm is called. + * The HTTP client ignores Form and uses Body instead. + */ + form: url.Values + /** + * PostForm contains the parsed form data from PATCH, POST + * or PUT body parameters. + * + * This field is only available after ParseForm is called. + * The HTTP client ignores PostForm and uses Body instead. + */ + postForm: url.Values + /** + * MultipartForm is the parsed multipart form, including file uploads. + * This field is only available after ParseMultipartForm is called. + * The HTTP client ignores MultipartForm and uses Body instead. + */ + multipartForm?: multipart.Form + /** + * Trailer specifies additional headers that are sent after the request + * body. + * + * For server requests, the Trailer map initially contains only the + * trailer keys, with nil values. (The client declares which trailers it + * will later send.) While the handler is reading from Body, it must + * not reference Trailer. After reading from Body returns EOF, Trailer + * can be read again and will contain non-nil values, if they were sent + * by the client. + * + * For client requests, Trailer must be initialized to a map containing + * the trailer keys to later send. The values may be nil or their final + * values. The ContentLength must be 0 or -1, to send a chunked request. + * After the HTTP request is sent the map values can be updated while + * the request body is read. Once the body returns EOF, the caller must + * not mutate Trailer. + * + * Few HTTP clients, servers, or proxies support HTTP trailers. + */ + trailer: Header + /** + * RemoteAddr allows HTTP servers and other software to record + * the network address that sent the request, usually for + * logging. This field is not filled in by ReadRequest and + * has no defined format. The HTTP server in this package + * sets RemoteAddr to an "IP:port" address before invoking a + * handler. + * This field is ignored by the HTTP client. + */ + remoteAddr: string + /** + * RequestURI is the unmodified request-target of the + * Request-Line (RFC 7230, Section 3.1.1) as sent by the client + * to a server. Usually the URL field should be used instead. + * It is an error to set this field in an HTTP client request. + */ + requestURI: string + /** + * TLS allows HTTP servers and other software to record + * information about the TLS connection on which the request + * was received. This field is not filled in by ReadRequest. + * The HTTP server in this package sets the field for + * TLS-enabled connections before invoking a handler; + * otherwise it leaves the field nil. + * This field is ignored by the HTTP client. + */ + tls?: any + /** + * Cancel is an optional channel whose closure indicates that the client + * request should be regarded as canceled. Not all implementations of + * RoundTripper may support Cancel. + * + * For server requests, this field is not applicable. + * + * Deprecated: Set the Request's context with NewRequestWithContext + * instead. If a Request's Cancel field and context are both + * set, it is undefined whether Cancel is respected. + */ + cancel: undefined + /** + * Response is the redirect response which caused this request + * to be created. This field is only populated during client + * redirects. + */ + response?: Response + /** + * Pattern is the [ServeMux] pattern that matched the request. + * It is empty if the request was not matched against a pattern. + */ + pattern: string + } + interface Request { + /** + * Context returns the request's context. To change the context, use + * [Request.Clone] or [Request.WithContext]. + * + * The returned context is always non-nil; it defaults to the + * background context. + * + * For outgoing client requests, the context controls cancellation. + * + * For incoming server requests, the context is canceled when the + * client's connection closes, the request is canceled (with HTTP/2), + * or when the ServeHTTP method returns. + */ + context(): context.Context + } + interface Request { + /** + * WithContext returns a shallow copy of r with its context changed + * to ctx. The provided ctx must be non-nil. + * + * For outgoing client request, the context controls the entire + * lifetime of a request and its response: obtaining a connection, + * sending the request, and reading the response headers and body. + * + * To create a new request with a context, use [NewRequestWithContext]. + * To make a deep copy of a request with a new context, use [Request.Clone]. + */ + withContext(ctx: context.Context): (Request) + } + interface Request { + /** + * Clone returns a deep copy of r with its context changed to ctx. + * The provided ctx must be non-nil. + * + * Clone only makes a shallow copy of the Body field. + * + * For an outgoing client request, the context controls the entire + * lifetime of a request and its response: obtaining a connection, + * sending the request, and reading the response headers and body. + */ + clone(ctx: context.Context): (Request) + } + interface Request { + /** + * ProtoAtLeast reports whether the HTTP protocol used + * in the request is at least major.minor. + */ + protoAtLeast(major: number, minor: number): boolean + } + interface Request { + /** + * UserAgent returns the client's User-Agent, if sent in the request. + */ + userAgent(): string + } + interface Request { + /** + * Cookies parses and returns the HTTP cookies sent with the request. + */ + cookies(): Array<(Cookie | undefined)> + } + interface Request { + /** + * CookiesNamed parses and returns the named HTTP cookies sent with the request + * or an empty slice if none matched. + */ + cookiesNamed(name: string): Array<(Cookie | undefined)> + } + interface Request { + /** + * Cookie returns the named cookie provided in the request or + * [ErrNoCookie] if not found. + * If multiple cookies match the given name, only one cookie will + * be returned. + */ + cookie(name: string): (Cookie) + } + interface Request { + /** + * AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, + * AddCookie does not attach more than one [Cookie] header field. That + * means all cookies, if any, are written into the same line, + * separated by semicolon. + * AddCookie only sanitizes c's name and value, and does not sanitize + * a Cookie header already present in the request. + */ + addCookie(c: Cookie): void + } + interface Request { + /** + * Referer returns the referring URL, if sent in the request. + * + * Referer is misspelled as in the request itself, a mistake from the + * earliest days of HTTP. This value can also be fetched from the + * [Header] map as Header["Referer"]; the benefit of making it available + * as a method is that the compiler can diagnose programs that use the + * alternate (correct English) spelling req.Referrer() but cannot + * diagnose programs that use Header["Referrer"]. + */ + referer(): string + } + interface Request { + /** + * MultipartReader returns a MIME multipart reader if this is a + * multipart/form-data or a multipart/mixed POST request, else returns nil and an error. + * Use this function instead of [Request.ParseMultipartForm] to + * process the request body as a stream. + */ + multipartReader(): (multipart.Reader) + } + interface Request { + /** + * Write writes an HTTP/1.1 request, which is the header and body, in wire format. + * This method consults the following fields of the request: + * + * ``` + * Host + * URL + * Method (defaults to "GET") + * Header + * ContentLength + * TransferEncoding + * Body + * ``` + * + * If Body is present, Content-Length is <= 0 and [Request.TransferEncoding] + * hasn't been set to "identity", Write adds "Transfer-Encoding: + * chunked" to the header. Body is closed after it is sent. + */ + write(w: io.Writer): void + } + interface Request { + /** + * WriteProxy is like [Request.Write] but writes the request in the form + * expected by an HTTP proxy. In particular, [Request.WriteProxy] writes the + * initial Request-URI line of the request with an absolute URI, per + * section 5.3 of RFC 7230, including the scheme and host. + * In either case, WriteProxy also writes a Host header, using + * either r.Host or r.URL.Host. + */ + writeProxy(w: io.Writer): void + } + interface Request { + /** + * BasicAuth returns the username and password provided in the request's + * Authorization header, if the request uses HTTP Basic Authentication. + * See RFC 2617, Section 2. + */ + basicAuth(): [string, boolean] + } + interface Request { + /** + * SetBasicAuth sets the request's Authorization header to use HTTP + * Basic Authentication with the provided username and password. + * + * With HTTP Basic Authentication the provided username and password + * are not encrypted. It should generally only be used in an HTTPS + * request. + * + * The username may not contain a colon. Some protocols may impose + * additional requirements on pre-escaping the username and + * password. For instance, when used with OAuth2, both arguments must + * be URL encoded first with [url.QueryEscape]. + */ + setBasicAuth(username: string, password: string): void + } + interface Request { + /** + * ParseForm populates r.Form and r.PostForm. + * + * For all requests, ParseForm parses the raw query from the URL and updates + * r.Form. + * + * For POST, PUT, and PATCH requests, it also reads the request body, parses it + * as a form and puts the results into both r.PostForm and r.Form. Request body + * parameters take precedence over URL query string values in r.Form. + * + * If the request Body's size has not already been limited by [MaxBytesReader], + * the size is capped at 10MB. + * + * For other HTTP methods, or when the Content-Type is not + * application/x-www-form-urlencoded, the request Body is not read, and + * r.PostForm is initialized to a non-nil, empty value. + * + * [Request.ParseMultipartForm] calls ParseForm automatically. + * ParseForm is idempotent. + */ + parseForm(): void + } + interface Request { + /** + * ParseMultipartForm parses a request body as multipart/form-data. + * The whole request body is parsed and up to a total of maxMemory bytes of + * its file parts are stored in memory, with the remainder stored on + * disk in temporary files. + * ParseMultipartForm calls [Request.ParseForm] if necessary. + * If ParseForm returns an error, ParseMultipartForm returns it but also + * continues parsing the request body. + * After one call to ParseMultipartForm, subsequent calls have no effect. + */ + parseMultipartForm(maxMemory: number): void + } + interface Request { + /** + * FormValue returns the first value for the named component of the query. + * The precedence order: + * 1. application/x-www-form-urlencoded form body (POST, PUT, PATCH only) + * 2. query parameters (always) + * 3. multipart/form-data form body (always) + * + * FormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] + * if necessary and ignores any errors returned by these functions. + * If key is not present, FormValue returns the empty string. + * To access multiple values of the same key, call ParseForm and + * then inspect [Request.Form] directly. + */ + formValue(key: string): string + } + interface Request { + /** + * PostFormValue returns the first value for the named component of the POST, + * PUT, or PATCH request body. URL query parameters are ignored. + * PostFormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary and ignores + * any errors returned by these functions. + * If key is not present, PostFormValue returns the empty string. + */ + postFormValue(key: string): string + } + interface Request { + /** + * FormFile returns the first file for the provided form key. + * FormFile calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary. + */ + formFile(key: string): [multipart.File, (multipart.FileHeader)] + } + interface Request { + /** + * PathValue returns the value for the named path wildcard in the [ServeMux] pattern + * that matched the request. + * It returns the empty string if the request was not matched against a pattern + * or there is no such wildcard in the pattern. + */ + pathValue(name: string): string + } + interface Request { + /** + * SetPathValue sets name to value, so that subsequent calls to r.PathValue(name) + * return value. + */ + setPathValue(name: string, value: string): void + } + /** + * A Handler responds to an HTTP request. + * + * [Handler.ServeHTTP] should write reply headers and data to the [ResponseWriter] + * and then return. Returning signals that the request is finished; it + * is not valid to use the [ResponseWriter] or read from the + * [Request.Body] after or concurrently with the completion of the + * ServeHTTP call. + * + * Depending on the HTTP client software, HTTP protocol version, and + * any intermediaries between the client and the Go server, it may not + * be possible to read from the [Request.Body] after writing to the + * [ResponseWriter]. Cautious handlers should read the [Request.Body] + * first, and then reply. + * + * Except for reading the body, handlers should not modify the + * provided Request. + * + * If ServeHTTP panics, the server (the caller of ServeHTTP) assumes + * that the effect of the panic was isolated to the active request. + * It recovers the panic, logs a stack trace to the server error log, + * and either closes the network connection or sends an HTTP/2 + * RST_STREAM, depending on the HTTP protocol. To abort a handler so + * the client sees an interrupted response but the server doesn't log + * an error, panic with the value [ErrAbortHandler]. + */ + interface Handler { + [key:string]: any; + serveHTTP(_arg0: ResponseWriter, _arg1: Request): void + } + /** + * A ResponseWriter interface is used by an HTTP handler to + * construct an HTTP response. + * + * A ResponseWriter may not be used after [Handler.ServeHTTP] has returned. + */ + interface ResponseWriter { + [key:string]: any; + /** + * Header returns the header map that will be sent by + * [ResponseWriter.WriteHeader]. The [Header] map also is the mechanism with which + * [Handler] implementations can set HTTP trailers. + * + * Changing the header map after a call to [ResponseWriter.WriteHeader] (or + * [ResponseWriter.Write]) has no effect unless the HTTP status code was of the + * 1xx class or the modified headers are trailers. + * + * There are two ways to set Trailers. The preferred way is to + * predeclare in the headers which trailers you will later + * send by setting the "Trailer" header to the names of the + * trailer keys which will come later. In this case, those + * keys of the Header map are treated as if they were + * trailers. See the example. The second way, for trailer + * keys not known to the [Handler] until after the first [ResponseWriter.Write], + * is to prefix the [Header] map keys with the [TrailerPrefix] + * constant value. + * + * To suppress automatic response headers (such as "Date"), set + * their value to nil. + */ + header(): Header + /** + * Write writes the data to the connection as part of an HTTP reply. + * + * If [ResponseWriter.WriteHeader] has not yet been called, Write calls + * WriteHeader(http.StatusOK) before writing the data. If the Header + * does not contain a Content-Type line, Write adds a Content-Type set + * to the result of passing the initial 512 bytes of written data to + * [DetectContentType]. Additionally, if the total size of all written + * data is under a few KB and there are no Flush calls, the + * Content-Length header is added automatically. + * + * Depending on the HTTP protocol version and the client, calling + * Write or WriteHeader may prevent future reads on the + * Request.Body. For HTTP/1.x requests, handlers should read any + * needed request body data before writing the response. Once the + * headers have been flushed (due to either an explicit Flusher.Flush + * call or writing enough data to trigger a flush), the request body + * may be unavailable. For HTTP/2 requests, the Go HTTP server permits + * handlers to continue to read the request body while concurrently + * writing the response. However, such behavior may not be supported + * by all HTTP/2 clients. Handlers should read before writing if + * possible to maximize compatibility. + */ + write(_arg0: string|Array): number + /** + * WriteHeader sends an HTTP response header with the provided + * status code. + * + * If WriteHeader is not called explicitly, the first call to Write + * will trigger an implicit WriteHeader(http.StatusOK). + * Thus explicit calls to WriteHeader are mainly used to + * send error codes or 1xx informational responses. + * + * The provided code must be a valid HTTP 1xx-5xx status code. + * Any number of 1xx headers may be written, followed by at most + * one 2xx-5xx header. 1xx headers are sent immediately, but 2xx-5xx + * headers may be buffered. Use the Flusher interface to send + * buffered data. The header map is cleared when 2xx-5xx headers are + * sent, but not with 1xx headers. + * + * The server will automatically send a 100 (Continue) header + * on the first read from the request body if the request has + * an "Expect: 100-continue" header. + */ + writeHeader(statusCode: number): void + } +} + +/** + * Package exec runs external commands. It wraps os.StartProcess to make it + * easier to remap stdin and stdout, connect I/O with pipes, and do other + * adjustments. + * + * Unlike the "system" library call from C and other languages, the + * os/exec package intentionally does not invoke the system shell and + * does not expand any glob patterns or handle other expansions, + * pipelines, or redirections typically done by shells. The package + * behaves more like C's "exec" family of functions. To expand glob + * patterns, either call the shell directly, taking care to escape any + * dangerous input, or use the [path/filepath] package's Glob function. + * To expand environment variables, use package os's ExpandEnv. + * + * Note that the examples in this package assume a Unix system. + * They may not run on Windows, and they do not run in the Go Playground + * used by golang.org and godoc.org. + * + * # Executables in the current directory + * + * The functions [Command] and [LookPath] look for a program + * in the directories listed in the current path, following the + * conventions of the host operating system. + * Operating systems have for decades included the current + * directory in this search, sometimes implicitly and sometimes + * configured explicitly that way by default. + * Modern practice is that including the current directory + * is usually unexpected and often leads to security problems. + * + * To avoid those security problems, as of Go 1.19, this package will not resolve a program + * using an implicit or explicit path entry relative to the current directory. + * That is, if you run [LookPath]("go"), it will not successfully return + * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. + * Instead, if the usual path algorithms would result in that answer, + * these functions return an error err satisfying [errors.Is](err, [ErrDot]). + * + * For example, consider these two program snippets: + * + * ``` + * path, err := exec.LookPath("prog") + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * ``` + * + * and + * + * ``` + * cmd := exec.Command("prog") + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * ``` + * + * These will not find and run ./prog or .\prog.exe, + * no matter how the current path is configured. + * + * Code that always wants to run a program from the current directory + * can be rewritten to say "./prog" instead of "prog". + * + * Code that insists on including results from relative path entries + * can instead override the error using an errors.Is check: + * + * ``` + * path, err := exec.LookPath("prog") + * if errors.Is(err, exec.ErrDot) { + * err = nil + * } + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * ``` + * + * and + * + * ``` + * cmd := exec.Command("prog") + * if errors.Is(cmd.Err, exec.ErrDot) { + * cmd.Err = nil + * } + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * ``` + * + * Setting the environment variable GODEBUG=execerrdot=0 + * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19 + * behavior for programs that are unable to apply more targeted fixes. + * A future version of Go may remove support for this variable. + * + * Before adding such overrides, make sure you understand the + * security implications of doing so. + * See https://go.dev/blog/path-security for more information. + */ +namespace exec { + /** + * Cmd represents an external command being prepared or run. + * + * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] + * methods. + */ + interface Cmd { + /** + * Path is the path of the command to run. + * + * This is the only field that must be set to a non-zero + * value. If Path is relative, it is evaluated relative + * to Dir. + */ + path: string + /** + * Args holds command line arguments, including the command as Args[0]. + * If the Args field is empty or nil, Run uses {Path}. + * + * In typical use, both Path and Args are set by calling Command. + */ + args: Array + /** + * Env specifies the environment of the process. + * Each entry is of the form "key=value". + * If Env is nil, the new process uses the current process's + * environment. + * If Env contains duplicate environment keys, only the last + * value in the slice for each duplicate key is used. + * As a special case on Windows, SYSTEMROOT is always added if + * missing and not explicitly set to the empty string. + */ + env: Array + /** + * Dir specifies the working directory of the command. + * If Dir is the empty string, Run runs the command in the + * calling process's current directory. + */ + dir: string + /** + * Stdin specifies the process's standard input. + * + * If Stdin is nil, the process reads from the null device (os.DevNull). + * + * If Stdin is an *os.File, the process's standard input is connected + * directly to that file. + * + * Otherwise, during the execution of the command a separate + * goroutine reads from Stdin and delivers that data to the command + * over a pipe. In this case, Wait does not complete until the goroutine + * stops copying, either because it has reached the end of Stdin + * (EOF or a read error), or because writing to the pipe returned an error, + * or because a nonzero WaitDelay was set and expired. + */ + stdin: io.Reader + /** + * Stdout and Stderr specify the process's standard output and error. + * + * If either is nil, Run connects the corresponding file descriptor + * to the null device (os.DevNull). + * + * If either is an *os.File, the corresponding output from the process + * is connected directly to that file. + * + * Otherwise, during the execution of the command a separate goroutine + * reads from the process over a pipe and delivers that data to the + * corresponding Writer. In this case, Wait does not complete until the + * goroutine reaches EOF or encounters an error or a nonzero WaitDelay + * expires. + * + * If Stdout and Stderr are the same writer, and have a type that can + * be compared with ==, at most one goroutine at a time will call Write. + */ + stdout: io.Writer + stderr: io.Writer + /** + * ExtraFiles specifies additional open files to be inherited by the + * new process. It does not include standard input, standard output, or + * standard error. If non-nil, entry i becomes file descriptor 3+i. + * + * ExtraFiles is not supported on Windows. + */ + extraFiles: Array<(os.File | undefined)> + /** + * SysProcAttr holds optional, operating system-specific attributes. + * Run passes it to os.StartProcess as the os.ProcAttr's Sys field. + */ + sysProcAttr?: syscall.SysProcAttr + /** + * Process is the underlying process, once started. + */ + process?: os.Process + /** + * ProcessState contains information about an exited process. + * If the process was started successfully, Wait or Run will + * populate its ProcessState when the command completes. + */ + processState?: os.ProcessState + err: Error // LookPath error, if any. + /** + * If Cancel is non-nil, the command must have been created with + * CommandContext and Cancel will be called when the command's + * Context is done. By default, CommandContext sets Cancel to + * call the Kill method on the command's Process. + * + * Typically a custom Cancel will send a signal to the command's + * Process, but it may instead take other actions to initiate cancellation, + * such as closing a stdin or stdout pipe or sending a shutdown request on a + * network socket. + * + * If the command exits with a success status after Cancel is + * called, and Cancel does not return an error equivalent to + * os.ErrProcessDone, then Wait and similar methods will return a non-nil + * error: either an error wrapping the one returned by Cancel, + * or the error from the Context. + * (If the command exits with a non-success status, or Cancel + * returns an error that wraps os.ErrProcessDone, Wait and similar methods + * continue to return the command's usual exit status.) + * + * If Cancel is set to nil, nothing will happen immediately when the command's + * Context is done, but a nonzero WaitDelay will still take effect. That may + * be useful, for example, to work around deadlocks in commands that do not + * support shutdown signals but are expected to always finish quickly. + * + * Cancel will not be called if Start returns a non-nil error. + */ + cancel: () => void + /** + * If WaitDelay is non-zero, it bounds the time spent waiting on two sources + * of unexpected delay in Wait: a child process that fails to exit after the + * associated Context is canceled, and a child process that exits but leaves + * its I/O pipes unclosed. + * + * The WaitDelay timer starts when either the associated Context is done or a + * call to Wait observes that the child process has exited, whichever occurs + * first. When the delay has elapsed, the command shuts down the child process + * and/or its I/O pipes. + * + * If the child process has failed to exit — perhaps because it ignored or + * failed to receive a shutdown signal from a Cancel function, or because no + * Cancel function was set — then it will be terminated using os.Process.Kill. + * + * Then, if the I/O pipes communicating with the child process are still open, + * those pipes are closed in order to unblock any goroutines currently blocked + * on Read or Write calls. + * + * If pipes are closed due to WaitDelay, no Cancel call has occurred, + * and the command has otherwise exited with a successful status, Wait and + * similar methods will return ErrWaitDelay instead of nil. + * + * If WaitDelay is zero (the default), I/O pipes will be read until EOF, + * which might not occur until orphaned subprocesses of the command have + * also closed their descriptors for the pipes. + */ + waitDelay: time.Duration + } + interface Cmd { + /** + * String returns a human-readable description of c. + * It is intended only for debugging. + * In particular, it is not suitable for use as input to a shell. + * The output of String may vary across Go releases. + */ + string(): string + } + interface Cmd { + /** + * Run starts the specified command and waits for it to complete. + * + * The returned error is nil if the command runs, has no problems + * copying stdin, stdout, and stderr, and exits with a zero exit + * status. + * + * If the command starts but does not complete successfully, the error is of + * type [*ExitError]. Other error types may be returned for other situations. + * + * If the calling goroutine has locked the operating system thread + * with [runtime.LockOSThread] and modified any inheritable OS-level + * thread state (for example, Linux or Plan 9 name spaces), the new + * process will inherit the caller's thread state. + */ + run(): void + } + interface Cmd { + /** + * Start starts the specified command but does not wait for it to complete. + * + * If Start returns successfully, the c.Process field will be set. + * + * After a successful call to Start the [Cmd.Wait] method must be called in + * order to release associated system resources. + */ + start(): void + } + interface Cmd { + /** + * Wait waits for the command to exit and waits for any copying to + * stdin or copying from stdout or stderr to complete. + * + * The command must have been started by [Cmd.Start]. + * + * The returned error is nil if the command runs, has no problems + * copying stdin, stdout, and stderr, and exits with a zero exit + * status. + * + * If the command fails to run or doesn't complete successfully, the + * error is of type [*ExitError]. Other error types may be + * returned for I/O problems. + * + * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits + * for the respective I/O loop copying to or from the process to complete. + * + * Wait releases any resources associated with the [Cmd]. + */ + wait(): void + } + interface Cmd { + /** + * Output runs the command and returns its standard output. + * Any returned error will usually be of type [*ExitError]. + * If c.Stderr was nil, Output populates [ExitError.Stderr]. + */ + output(): string|Array + } + interface Cmd { + /** + * CombinedOutput runs the command and returns its combined standard + * output and standard error. + */ + combinedOutput(): string|Array + } + interface Cmd { + /** + * StdinPipe returns a pipe that will be connected to the command's + * standard input when the command starts. + * The pipe will be closed automatically after [Cmd.Wait] sees the command exit. + * A caller need only call Close to force the pipe to close sooner. + * For example, if the command being run will not exit until standard input + * is closed, the caller must close the pipe. + */ + stdinPipe(): io.WriteCloser + } + interface Cmd { + /** + * StdoutPipe returns a pipe that will be connected to the command's + * standard output when the command starts. + * + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. + * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. + * See the example for idiomatic usage. + */ + stdoutPipe(): io.ReadCloser + } + interface Cmd { + /** + * StderrPipe returns a pipe that will be connected to the command's + * standard error when the command starts. + * + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. + * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. + * See the StdoutPipe example for idiomatic usage. + */ + stderrPipe(): io.ReadCloser + } + interface Cmd { + /** + * Environ returns a copy of the environment in which the command would be run + * as it is currently configured. + */ + environ(): Array + } +} + +/** + * Package blob provides an easy and portable way to interact with blobs + * within a storage location. Subpackages contain driver implementations of + * blob for supported services. + * + * See https://gocloud.dev/howto/blob/ for a detailed how-to guide. + * + * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with + * functions in that package. + * + * # Errors + * + * The errors returned from this package can be inspected in several ways: + * + * The Code function from gocloud.dev/gcerrors will return an error code, also + * defined in that package, when invoked on an error. + * + * The Bucket.ErrorAs method can retrieve the driver error underlying the returned + * error. + * + * # OpenCensus Integration + * + * OpenCensus supports tracing and metric collection for multiple languages and + * backend providers. See https://opencensus.io. + * + * This API collects OpenCensus traces and metrics for the following methods: + * ``` + * - Attributes + * - Copy + * - Delete + * - ListPage + * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll + * are included because they call NewRangeReader.) + * - NewWriter, from creation until the call to Close. + * ``` + * + * All trace and metric names begin with the package import path. + * The traces add the method name. + * For example, "gocloud.dev/blob/Attributes". + * The metrics are "completed_calls", a count of completed method calls by driver, + * method and status (error code); and "latency", a distribution of method latency + * by driver and method. + * For example, "gocloud.dev/blob/latency". + * + * It also collects the following metrics: + * ``` + * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver. + * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver. + * ``` + * + * To enable trace collection in your application, see "Configure Exporter" at + * https://opencensus.io/quickstart/go/tracing. + * To enable metric collection in your application, see "Exporting stats" at + * https://opencensus.io/quickstart/go/metrics. + */ +namespace blob { + /** + * Reader reads bytes from a blob. + * It implements io.ReadSeekCloser, and must be closed after + * reads are finished. + */ + interface Reader { + } + interface Reader { + /** + * Read implements io.Reader (https://golang.org/pkg/io/#Reader). + */ + read(p: string|Array): number + } + interface Reader { + /** + * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). + */ + seek(offset: number, whence: number): number + } + interface Reader { + /** + * Close implements io.Closer (https://golang.org/pkg/io/#Closer). + */ + close(): void + } + interface Reader { + /** + * ContentType returns the MIME type of the blob. + */ + contentType(): string + } + interface Reader { + /** + * ModTime returns the time the blob was last modified. + */ + modTime(): time.Time + } + interface Reader { + /** + * Size returns the size of the blob content in bytes. + */ + size(): number + } + interface Reader { + /** + * As converts i to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information, the "As" + * examples in this package for examples, and the driver package + * documentation for the specific types supported for that driver. + */ + as(i: { + }): boolean + } + interface Reader { + /** + * WriteTo reads from r and writes to w until there's no more data or + * an error occurs. + * The return value is the number of bytes written to w. + * + * It implements the io.WriterTo interface. + */ + writeTo(w: io.Writer): number + } + /** + * Attributes contains attributes about a blob. + */ + interface Attributes { + /** + * CacheControl specifies caching attributes that services may use + * when serving the blob. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + */ + cacheControl: string + /** + * ContentDisposition specifies whether the blob content is expected to be + * displayed inline or as an attachment. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition + */ + contentDisposition: string + /** + * ContentEncoding specifies the encoding used for the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + */ + contentEncoding: string + /** + * ContentLanguage specifies the language used in the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language + */ + contentLanguage: string + /** + * ContentType is the MIME type of the blob. It will not be empty. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type + */ + contentType: string + /** + * Metadata holds key/value pairs associated with the blob. + * Keys are guaranteed to be in lowercase, even if the backend service + * has case-sensitive keys (although note that Metadata written via + * this package will always be lowercased). If there are duplicate + * case-insensitive keys (e.g., "foo" and "FOO"), only one value + * will be kept, and it is undefined which one. + */ + metadata: _TygojaDict + /** + * CreateTime is the time the blob was created, if available. If not available, + * CreateTime will be the zero time. + */ + createTime: time.Time + /** + * ModTime is the time the blob was last modified. + */ + modTime: time.Time + /** + * Size is the size of the blob's content in bytes. + */ + size: number + /** + * MD5 is an MD5 hash of the blob contents or nil if not available. + */ + md5: string|Array + /** + * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. + */ + eTag: string + } + interface Attributes { + /** + * As converts i to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information, the "As" + * examples in this package for examples, and the driver package + * documentation for the specific types supported for that driver. + */ + as(i: { + }): boolean + } + /** + * ListObject represents a single blob returned from List. + */ + interface ListObject { + /** + * Key is the key for this blob. + */ + key: string + /** + * ModTime is the time the blob was last modified. + */ + modTime: time.Time + /** + * Size is the size of the blob's content in bytes. + */ + size: number + /** + * MD5 is an MD5 hash of the blob contents or nil if not available. + */ + md5: string|Array + /** + * IsDir indicates that this result represents a "directory" in the + * hierarchical namespace, ending in ListOptions.Delimiter. Key can be + * passed as ListOptions.Prefix to list items in the "directory". + * Fields other than Key and IsDir will not be set if IsDir is true. + */ + isDir: boolean + } + interface ListObject { + /** + * As converts i to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information, the "As" + * examples in this package for examples, and the driver package + * documentation for the specific types supported for that driver. + */ + as(i: { + }): boolean + } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { + /** + * DateTime represents a [time.Time] instance in UTC that is wrapped + * and serialized using the app default date layout. + */ + interface DateTime { + } + interface DateTime { + /** + * Time returns the internal [time.Time] instance. + */ + time(): time.Time + } + interface DateTime { + /** + * Add returns a new DateTime based on the current DateTime + the specified duration. + */ + add(duration: time.Duration): DateTime + } + interface DateTime { + /** + * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one. + * + * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], + * the maximum (or minimum) duration will be returned. + */ + sub(u: DateTime): time.Duration + } + interface DateTime { + /** + * AddDate returns a new DateTime based on the current one + duration. + * + * It follows the same rules as [time.AddDate]. + */ + addDate(years: number, months: number, days: number): DateTime + } + interface DateTime { + /** + * After reports whether the current DateTime instance is after u. + */ + after(u: DateTime): boolean + } + interface DateTime { + /** + * Before reports whether the current DateTime instance is before u. + */ + before(u: DateTime): boolean + } + interface DateTime { + /** + * Compare compares the current DateTime instance with u. + * If the current instance is before u, it returns -1. + * If the current instance is after u, it returns +1. + * If they're the same, it returns 0. + */ + compare(u: DateTime): number + } + interface DateTime { + /** + * Equal reports whether the current DateTime and u represent the same time instant. + * Two DateTime can be equal even if they are in different locations. + * For example, 6:00 +0200 and 4:00 UTC are Equal. + */ + equal(u: DateTime): boolean + } + interface DateTime { + /** + * Unix returns the current DateTime as a Unix time, aka. + * the number of seconds elapsed since January 1, 1970 UTC. + */ + unix(): number + } + interface DateTime { + /** + * IsZero checks whether the current DateTime instance has zero time value. + */ + isZero(): boolean + } + interface DateTime { + /** + * String serializes the current DateTime instance into a formatted + * UTC date string. + * + * The zero value is serialized to an empty string. + */ + string(): string + } + interface DateTime { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface DateTime { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface DateTime { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface DateTime { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current DateTime instance. + */ + scan(value: any): void + } +} + +namespace router { + // @ts-ignore + import validation = ozzo_validation + /** + * ApiError defines the struct for a basic api error response. + */ + interface ApiError { + data: _TygojaDict + message: string + status: number + } + interface ApiError { + /** + * Error makes it compatible with the `error` interface. + */ + error(): string + } + interface ApiError { + /** + * RawData returns the unformatted error data (could be an internal error, text, etc.) + */ + rawData(): any + } + interface ApiError { + /** + * Is reports whether the current ApiError wraps the target. + */ + is(target: Error): boolean + } + /** + * Router defines a thin wrapper around the standard Go [http.ServeMux] by + * adding support for routing sub-groups, middlewares and other common utils. + * + * Example: + * + * ``` + * r := NewRouter[*MyEvent](eventFactory) + * + * // middlewares + * r.BindFunc(m1, m2) + * + * // routes + * r.GET("/test", handler1) + * + * // sub-routers/groups + * api := r.Group("/api") + * api.GET("/admins", handler2) + * + * // generate a http.ServeMux instance based on the router configurations + * mux, _ := r.BuildMux() + * + * http.ListenAndServe("localhost:8090", mux) + * ``` + */ + type _subumReX = RouterGroup + interface Router extends _subumReX { + } + interface Router { + /** + * BuildMux constructs a new mux [http.Handler] instance from the current router configurations. + */ + buildMux(): http.Handler + } +} + +/** + * Package core is the backbone of PocketBase. + * + * It defines the main PocketBase App interface and its base implementation. + */ +namespace core { + /** + * App defines the main PocketBase app interface. + * + * Note that the interface is not intended to be implemented manually by users + * and instead they should use core.BaseApp (either directly or as embedded field in a custom struct). + * + * This interface exists to make testing easier and to allow users to + * create common and pluggable helpers and methods that doesn't rely + * on a specific wrapped app struct (hence the large interface size). + */ + interface App { + [key:string]: any; + /** + * UnsafeWithoutHooks returns a shallow copy of the current app WITHOUT any registered hooks. + * + * NB! Note that using the returned app instance may cause data integrity errors + * since the Record validations and data normalizations (including files uploads) + * rely on the app hooks to work. + */ + unsafeWithoutHooks(): App + /** + * Logger returns the default app logger. + * + * If the application is not bootstrapped yet, fallbacks to slog.Default(). + */ + logger(): (slog.Logger) + /** + * IsBootstrapped checks if the application was initialized + * (aka. whether Bootstrap() was called). + */ + isBootstrapped(): boolean + /** + * IsTransactional checks if the current app instance is part of a transaction. + */ + isTransactional(): boolean + /** + * Bootstrap initializes the application + * (aka. create data dir, open db connections, load settings, etc.). + * + * It will call ResetBootstrapState() if the application was already bootstrapped. + */ + bootstrap(): void + /** + * ResetBootstrapState releases the initialized core app resources + * (closing db connections, stopping cron ticker, etc.). + */ + resetBootstrapState(): void + /** + * DataDir returns the app data directory path. + */ + dataDir(): string + /** + * EncryptionEnv returns the name of the app secret env key + * (currently used primarily for optional settings encryption but this may change in the future). + */ + encryptionEnv(): string + /** + * IsDev returns whether the app is in dev mode. + * + * When enabled logs, executed sql statements, etc. are printed to the stderr. + */ + isDev(): boolean + /** + * Settings returns the loaded app settings. + */ + settings(): (Settings) + /** + * Store returns the app runtime store. + */ + store(): (store.Store) + /** + * Cron returns the app cron instance. + */ + cron(): (cron.Cron) + /** + * SubscriptionsBroker returns the app realtime subscriptions broker instance. + */ + subscriptionsBroker(): (subscriptions.Broker) + /** + * NewMailClient creates and returns a new SMTP or Sendmail client + * based on the current app settings. + */ + newMailClient(): mailer.Mailer + /** + * NewFilesystem creates a new local or S3 filesystem instance + * for managing regular app files (ex. record uploads) + * based on the current app settings. + * + * NB! Make sure to call Close() on the returned result + * after you are done working with it. + */ + newFilesystem(): (filesystem.System) + /** + * NewFilesystem creates a new local or S3 filesystem instance + * for managing app backups based on the current app settings. + * + * NB! Make sure to call Close() on the returned result + * after you are done working with it. + */ + newBackupsFilesystem(): (filesystem.System) + /** + * ReloadSettings reinitializes and reloads the stored application settings. + */ + reloadSettings(): void + /** + * CreateBackup creates a new backup of the current app pb_data directory. + * + * Backups can be stored on S3 if it is configured in app.Settings().Backups. + * + * Please refer to the godoc of the specific CoreApp implementation + * for details on the backup procedures. + */ + createBackup(ctx: context.Context, name: string): void + /** + * RestoreBackup restores the backup with the specified name and restarts + * the current running application process. + * + * The safely perform the restore it is recommended to have free disk space + * for at least 2x the size of the restored pb_data backup. + * + * Please refer to the godoc of the specific CoreApp implementation + * for details on the restore procedures. + * + * NB! This feature is experimental and currently is expected to work only on UNIX based systems. + */ + restoreBackup(ctx: context.Context, name: string): void + /** + * Restart restarts (aka. replaces) the current running application process. + * + * NB! It relies on execve which is supported only on UNIX based systems. + */ + restart(): void + /** + * RunSystemMigrations applies all new migrations registered in the [core.SystemMigrations] list. + */ + runSystemMigrations(): void + /** + * RunAppMigrations applies all new migrations registered in the [CoreAppMigrations] list. + */ + runAppMigrations(): void + /** + * RunAllMigrations applies all system and app migrations + * (aka. from both [core.SystemMigrations] and [CoreAppMigrations]). + */ + runAllMigrations(): void + /** + * DB returns the default app data db instance (pb_data/data.db). + */ + db(): dbx.Builder + /** + * NonconcurrentDB returns the nonconcurrent app data db instance (pb_data/data.db). + * + * The returned db instance is limited only to a single open connection, + * meaning that it can process only 1 db operation at a time (other operations will be queued up). + * + * This method is used mainly internally and in the tests to execute write + * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors. + * + * For the majority of cases you would want to use the regular DB() method + * since it allows concurrent db read operations. + * + * In a transaction the ConcurrentDB() and NonconcurrentDB() refer to the same *dbx.TX instance. + */ + nonconcurrentDB(): dbx.Builder + /** + * AuxDB returns the default app auxiliary db instance (pb_data/aux.db). + */ + auxDB(): dbx.Builder + /** + * AuxNonconcurrentDB returns the nonconcurrent app auxiliary db instance (pb_data/aux.db).. + * + * The returned db instance is limited only to a single open connection, + * meaning that it can process only 1 db operation at a time (other operations will be queued up). + * + * This method is used mainly internally and in the tests to execute write + * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors. + * + * For the majority of cases you would want to use the regular DB() method + * since it allows concurrent db read operations. + * + * In a transaction the AuxNonconcurrentDB() and AuxNonconcurrentDB() refer to the same *dbx.TX instance. + */ + auxNonconcurrentDB(): dbx.Builder + /** + * HasTable checks if a table (or view) with the provided name exists (case insensitive). + */ + hasTable(tableName: string): boolean + /** + * TableColumns returns all column names of a single table by its name. + */ + tableColumns(tableName: string): Array + /** + * TableInfo returns the "table_info" pragma result for the specified table. + */ + tableInfo(tableName: string): Array<(TableInfoRow | undefined)> + /** + * TableIndexes returns a name grouped map with all non empty index of the specified table. + * + * Note: This method doesn't return an error on nonexisting table. + */ + tableIndexes(tableName: string): _TygojaDict + /** + * DeleteTable drops the specified table. + * + * This method is a no-op if a table with the provided name doesn't exist. + * + * NB! Be aware that this method is vulnerable to SQL injection and the + * "tableName" argument must come only from trusted input! + */ + deleteTable(tableName: string): void + /** + * DeleteView drops the specified view name. + * + * This method is a no-op if a view with the provided name doesn't exist. + * + * NB! Be aware that this method is vulnerable to SQL injection and the + * "name" argument must come only from trusted input! + */ + deleteView(name: string): void + /** + * SaveView creates (or updates already existing) persistent SQL view. + * + * NB! Be aware that this method is vulnerable to SQL injection and the + * "selectQuery" argument must come only from trusted input! + */ + saveView(name: string, selectQuery: string): void + /** + * CreateViewFields creates a new FieldsList from the provided select query. + * + * There are some caveats: + * - The select query must have an "id" column. + * - Wildcard ("*") columns are not supported to avoid accidentally leaking sensitive data. + */ + createViewFields(selectQuery: string): FieldsList + /** + * FindRecordByViewFile returns the original Record of the provided view collection file. + */ + findRecordByViewFile(viewCollectionModelOrIdentifier: any, fileFieldName: string, filename: string): (Record) + /** + * Vacuum executes VACUUM on the current app.DB() instance + * in order to reclaim unused data db disk space. + */ + vacuum(): void + /** + * AuxVacuum executes VACUUM on the current app.AuxDB() instance + * in order to reclaim unused auxiliary db disk space. + */ + auxVacuum(): void + /** + * ModelQuery creates a new preconfigured select app.DB() query with preset + * SELECT, FROM and other common fields based on the provided model. + */ + modelQuery(model: Model): (dbx.SelectQuery) + /** + * AuxModelQuery creates a new preconfigured select app.AuxDB() query with preset + * SELECT, FROM and other common fields based on the provided model. + */ + auxModelQuery(model: Model): (dbx.SelectQuery) + /** + * Delete deletes the specified model from the regular app database. + */ + delete(model: Model): void + /** + * Delete deletes the specified model from the regular app database + * (the context could be used to limit the query execution). + */ + deleteWithContext(ctx: context.Context, model: Model): void + /** + * AuxDelete deletes the specified model from the auxiliary database. + */ + auxDelete(model: Model): void + /** + * AuxDeleteWithContext deletes the specified model from the auxiliary database + * (the context could be used to limit the query execution). + */ + auxDeleteWithContext(ctx: context.Context, model: Model): void + /** + * Save validates and saves the specified model into the regular app database. + * + * If you don't want to run validations, use [App.SaveNoValidate()]. + */ + save(model: Model): void + /** + * SaveWithContext is the same as [App.Save()] but allows specifying a context to limit the db execution. + * + * If you don't want to run validations, use [App.SaveNoValidateWithContext()]. + */ + saveWithContext(ctx: context.Context, model: Model): void + /** + * SaveNoValidate saves the specified model into the regular app database without performing validations. + * + * If you want to also run validations before persisting, use [App.Save()]. + */ + saveNoValidate(model: Model): void + /** + * SaveNoValidateWithContext is the same as [App.SaveNoValidate()] + * but allows specifying a context to limit the db execution. + * + * If you want to also run validations before persisting, use [App.SaveWithContext()]. + */ + saveNoValidateWithContext(ctx: context.Context, model: Model): void + /** + * AuxSave validates and saves the specified model into the auxiliary app database. + * + * If you don't want to run validations, use [App.AuxSaveNoValidate()]. + */ + auxSave(model: Model): void + /** + * AuxSaveWithContext is the same as [App.AuxSave()] but allows specifying a context to limit the db execution. + * + * If you don't want to run validations, use [App.AuxSaveNoValidateWithContext()]. + */ + auxSaveWithContext(ctx: context.Context, model: Model): void + /** + * AuxSaveNoValidate saves the specified model into the auxiliary app database without performing validations. + * + * If you want to also run validations before persisting, use [App.AuxSave()]. + */ + auxSaveNoValidate(model: Model): void + /** + * AuxSaveNoValidateWithContext is the same as [App.AuxSaveNoValidate()] + * but allows specifying a context to limit the db execution. + * + * If you want to also run validations before persisting, use [App.AuxSaveWithContext()]. + */ + auxSaveNoValidateWithContext(ctx: context.Context, model: Model): void + /** + * Validate triggers the OnModelValidate hook for the specified model. + */ + validate(model: Model): void + /** + * ValidateWithContext is the same as Validate but allows specifying the ModelEvent context. + */ + validateWithContext(ctx: context.Context, model: Model): void + /** + * RunInTransaction wraps fn into a transaction for the regular app database. + * + * It is safe to nest RunInTransaction calls as long as you use the callback's txApp. + */ + runInTransaction(fn: (txApp: App) => void): void + /** + * AuxRunInTransaction wraps fn into a transaction for the auxiliary app database. + * + * It is safe to nest RunInTransaction calls as long as you use the callback's txApp. + */ + auxRunInTransaction(fn: (txApp: App) => void): void + /** + * LogQuery returns a new Log select query. + */ + logQuery(): (dbx.SelectQuery) + /** + * FindLogById finds a single Log entry by its id. + */ + findLogById(id: string): (Log) + /** + * LogsStatsItem defines the total number of logs for a specific time period. + */ + logsStats(expr: dbx.Expression): Array<(LogsStatsItem | undefined)> + /** + * DeleteOldLogs delete all requests that are created before createdBefore. + */ + deleteOldLogs(createdBefore: time.Time): void + /** + * CollectionQuery returns a new Collection select query. + */ + collectionQuery(): (dbx.SelectQuery) + /** + * FindCollections finds all collections by the given type(s). + * + * If collectionTypes is not set, it returns all collections. + * + * Example: + * + * ``` + * app.FindAllCollections() // all collections + * app.FindAllCollections("auth", "view") // only auth and view collections + * ``` + */ + findAllCollections(...collectionTypes: string[]): Array<(Collection | undefined)> + /** + * ReloadCachedCollections fetches all collections and caches them into the app store. + */ + reloadCachedCollections(): void + /** + * FindCollectionByNameOrId finds a single collection by its name (case insensitive) or id.s + */ + findCollectionByNameOrId(nameOrId: string): (Collection) + /** + * FindCachedCollectionByNameOrId is similar to [App.FindCollectionByNameOrId] + * but retrieves the Collection from the app cache instead of making a db call. + * + * NB! This method is suitable for read-only Collection operations. + * + * Returns [sql.ErrNoRows] if no Collection is found for consistency + * with the [App.FindCollectionByNameOrId] method. + * + * If you plan making changes to the returned Collection model, + * use [App.FindCollectionByNameOrId] instead. + * + * Caveats: + * + * ``` + * - The returned Collection should be used only for read-only operations. + * Avoid directly modifying the returned cached Collection as it will affect + * the global cached value even if you don't persist the changes in the database! + * - If you are updating a Collection in a transaction and then call this method before commit, + * it'll return the cached Collection state and not the one from the uncommitted transaction. + * - The cache is automatically updated on collections db change (create/update/delete). + * To manually reload the cache you can call [App.ReloadCachedCollections()] + * ``` + */ + findCachedCollectionByNameOrId(nameOrId: string): (Collection) + /** + * IsCollectionNameUnique checks that there is no existing collection + * with the provided name (case insensitive!). + * + * Note: case insensitive check because the name is used also as + * table name for the records. + */ + isCollectionNameUnique(name: string, ...excludeIds: string[]): boolean + /** + * FindCollectionReferences returns information for all relation + * fields referencing the provided collection. + * + * If the provided collection has reference to itself then it will be + * also included in the result. To exclude it, pass the collection id + * as the excludeIds argument. + */ + findCollectionReferences(collection: Collection, ...excludeIds: string[]): _TygojaDict + /** + * TruncateCollection deletes all records associated with the provided collection. + * + * The truncate operation is executed in a single transaction, + * aka. either everything is deleted or none. + * + * Note that this method will also trigger the records related + * cascade and file delete actions. + */ + truncateCollection(collection: Collection): void + /** + * ImportCollections imports the provided collections data in a single transaction. + * + * For existing matching collections, the imported data is unmarshaled on top of the existing model. + * + * NB! If deleteMissing is true, ALL NON-SYSTEM COLLECTIONS AND SCHEMA FIELDS, + * that are not present in the imported configuration, WILL BE DELETED + * (this includes their related records data). + */ + importCollections(toImport: Array<_TygojaDict>, deleteMissing: boolean): void + /** + * ImportCollectionsByMarshaledJSON is the same as [ImportCollections] + * but accept marshaled json array as import data (usually used for the autogenerated snapshots). + */ + importCollectionsByMarshaledJSON(rawSliceOfMaps: string|Array, deleteMissing: boolean): void + /** + * SyncRecordTableSchema compares the two provided collections + * and applies the necessary related record table changes. + * + * If oldCollection is null, then only newCollection is used to create the record table. + * + * This method is automatically invoked as part of a collection create/update/delete operation. + */ + syncRecordTableSchema(newCollection: Collection, oldCollection: Collection): void + /** + * FindAllExternalAuthsByRecord returns all ExternalAuth models + * linked to the provided auth record. + */ + findAllExternalAuthsByRecord(authRecord: Record): Array<(ExternalAuth | undefined)> + /** + * FindAllExternalAuthsByCollection returns all ExternalAuth models + * linked to the provided auth collection. + */ + findAllExternalAuthsByCollection(collection: Collection): Array<(ExternalAuth | undefined)> + /** + * FindFirstExternalAuthByExpr returns the first available (the most recent created) + * ExternalAuth model that satisfies the non-nil expression. + */ + findFirstExternalAuthByExpr(expr: dbx.Expression): (ExternalAuth) + /** + * FindAllMFAsByRecord returns all MFA models linked to the provided auth record. + */ + findAllMFAsByRecord(authRecord: Record): Array<(MFA | undefined)> + /** + * FindAllMFAsByCollection returns all MFA models linked to the provided collection. + */ + findAllMFAsByCollection(collection: Collection): Array<(MFA | undefined)> + /** + * FindMFAById returns a single MFA model by its id. + */ + findMFAById(id: string): (MFA) + /** + * DeleteAllMFAsByRecord deletes all MFA models associated with the provided record. + * + * Returns a combined error with the failed deletes. + */ + deleteAllMFAsByRecord(authRecord: Record): void + /** + * DeleteExpiredMFAs deletes the expired MFAs for all auth collections. + */ + deleteExpiredMFAs(): void + /** + * FindAllOTPsByRecord returns all OTP models linked to the provided auth record. + */ + findAllOTPsByRecord(authRecord: Record): Array<(OTP | undefined)> + /** + * FindAllOTPsByCollection returns all OTP models linked to the provided collection. + */ + findAllOTPsByCollection(collection: Collection): Array<(OTP | undefined)> + /** + * FindOTPById returns a single OTP model by its id. + */ + findOTPById(id: string): (OTP) + /** + * DeleteAllOTPsByRecord deletes all OTP models associated with the provided record. + * + * Returns a combined error with the failed deletes. + */ + deleteAllOTPsByRecord(authRecord: Record): void + /** + * DeleteExpiredOTPs deletes the expired OTPs for all auth collections. + */ + deleteExpiredOTPs(): void + /** + * FindAllAuthOriginsByRecord returns all AuthOrigin models linked to the provided auth record (in DESC order). + */ + findAllAuthOriginsByRecord(authRecord: Record): Array<(AuthOrigin | undefined)> + /** + * FindAllAuthOriginsByCollection returns all AuthOrigin models linked to the provided collection (in DESC order). + */ + findAllAuthOriginsByCollection(collection: Collection): Array<(AuthOrigin | undefined)> + /** + * FindAuthOriginById returns a single AuthOrigin model by its id. + */ + findAuthOriginById(id: string): (AuthOrigin) + /** + * FindAuthOriginByRecordAndFingerprint returns a single AuthOrigin model + * by its authRecord relation and fingerprint. + */ + findAuthOriginByRecordAndFingerprint(authRecord: Record, fingerprint: string): (AuthOrigin) + /** + * DeleteAllAuthOriginsByRecord deletes all AuthOrigin models associated with the provided record. + * + * Returns a combined error with the failed deletes. + */ + deleteAllAuthOriginsByRecord(authRecord: Record): void + /** + * RecordQuery returns a new Record select query from a collection model, id or name. + * + * In case a collection id or name is provided and that collection doesn't + * actually exists, the generated query will be created with a cancelled context + * and will fail once an executor (Row(), One(), All(), etc.) is called. + */ + recordQuery(collectionModelOrIdentifier: any): (dbx.SelectQuery) + /** + * FindRecordById finds the Record model by its id. + */ + findRecordById(collectionModelOrIdentifier: any, recordId: string, ...optFilters: ((q: dbx.SelectQuery) => void)[]): (Record) + /** + * FindRecordsByIds finds all records by the specified ids. + * If no records are found, returns an empty slice. + */ + findRecordsByIds(collectionModelOrIdentifier: any, recordIds: Array, ...optFilters: ((q: dbx.SelectQuery) => void)[]): Array<(Record | undefined)> + /** + * FindAllRecords finds all records matching specified db expressions. + * + * Returns all collection records if no expression is provided. + * + * Returns an empty slice if no records are found. + * + * Example: + * + * ``` + * // no extra expressions + * app.FindAllRecords("example") + * + * // with extra expressions + * expr1 := dbx.HashExp{"email": "test@example.com"} + * expr2 := dbx.NewExp("LOWER(username) = {:username}", dbx.Params{"username": "test"}) + * app.FindAllRecords("example", expr1, expr2) + * ``` + */ + findAllRecords(collectionModelOrIdentifier: any, ...exprs: dbx.Expression[]): Array<(Record | undefined)> + /** + * FindFirstRecordByData returns the first found record matching + * the provided key-value pair. + */ + findFirstRecordByData(collectionModelOrIdentifier: any, key: string, value: any): (Record) + /** + * FindRecordsByFilter returns limit number of records matching the + * provided string filter. + * + * NB! Use the last "params" argument to bind untrusted user variables! + * + * The filter argument is optional and can be empty string to target + * all available records. + * + * The sort argument is optional and can be empty string OR the same format + * used in the web APIs, ex. "-created,title". + * + * If the limit argument is <= 0, no limit is applied to the query and + * all matching records are returned. + * + * Returns an empty slice if no records are found. + * + * Example: + * + * ``` + * app.FindRecordsByFilter( + * "posts", + * "title ~ {:title} && visible = {:visible}", + * "-created", + * 10, + * 0, + * dbx.Params{"title": "lorem ipsum", "visible": true} + * ) + * ``` + */ + findRecordsByFilter(collectionModelOrIdentifier: any, filter: string, sort: string, limit: number, offset: number, ...params: dbx.Params[]): Array<(Record | undefined)> + /** + * FindFirstRecordByFilter returns the first available record matching the provided filter (if any). + * + * NB! Use the last params argument to bind untrusted user variables! + * + * Returns sql.ErrNoRows if no record is found. + * + * Example: + * + * ``` + * app.FindFirstRecordByFilter("posts", "") + * app.FindFirstRecordByFilter("posts", "slug={:slug} && status='public'", dbx.Params{"slug": "test"}) + * ``` + */ + findFirstRecordByFilter(collectionModelOrIdentifier: any, filter: string, ...params: dbx.Params[]): (Record) + /** + * CountRecords returns the total number of records in a collection. + */ + countRecords(collectionModelOrIdentifier: any, ...exprs: dbx.Expression[]): number + /** + * FindAuthRecordByToken finds the auth record associated with the provided JWT + * (auth, file, verifyEmail, changeEmail, passwordReset types). + * + * Optionally specify a list of validTypes to check tokens only from those types. + * + * Returns an error if the JWT is invalid, expired or not associated to an auth collection record. + */ + findAuthRecordByToken(token: string, ...validTypes: string[]): (Record) + /** + * FindAuthRecordByEmail finds the auth record associated with the provided email. + * + * Returns an error if it is not an auth collection or the record is not found. + */ + findAuthRecordByEmail(collectionModelOrIdentifier: any, email: string): (Record) + /** + * CanAccessRecord checks if a record is allowed to be accessed by the + * specified requestInfo and accessRule. + * + * Rule and db checks are ignored in case requestInfo.AuthRecord is a superuser. + * + * The returned error indicate that something unexpected happened during + * the check (eg. invalid rule or db query error). + * + * The method always return false on invalid rule or db query error. + * + * Example: + * + * ``` + * requestInfo, _ := e.RequestInfo() + * record, _ := app.FindRecordById("example", "RECORD_ID") + * rule := types.Pointer("@request.auth.id != '' || status = 'public'") + * // ... or use one of the record collection's rule, eg. record.Collection().ViewRule + * + * if ok, _ := app.CanAccessRecord(record, requestInfo, rule); ok { ... } + * ``` + */ + canAccessRecord(record: Record, requestInfo: RequestInfo, accessRule: string): boolean + /** + * ExpandRecord expands the relations of a single Record model. + * + * If optFetchFunc is not set, then a default function will be used + * that returns all relation records. + * + * Returns a map with the failed expand parameters and their errors. + */ + expandRecord(record: Record, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict + /** + * ExpandRecords expands the relations of the provided Record models list. + * + * If optFetchFunc is not set, then a default function will be used + * that returns all relation records. + * + * Returns a map with the failed expand parameters and their errors. + */ + expandRecords(records: Array<(Record | undefined)>, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict + /** + * OnBootstrap hook is triggered on initializing the main application + * resources (db, app settings, etc). + */ + onBootstrap(): (hook.Hook) + /** + * OnServe hook is triggered on when the app web server is started + * (after starting the tcp listener but before initializing the blocking serve task), + * allowing you to adjust its options and attach new routes or middlewares. + */ + onServe(): (hook.Hook) + /** + * OnTerminate hook is triggered when the app is in the process + * of being terminated (ex. on SIGTERM signal). + */ + onTerminate(): (hook.Hook) + /** + * OnBackupCreate hook is triggered on each [App.CreateBackup] call. + */ + onBackupCreate(): (hook.Hook) + /** + * OnBackupRestore hook is triggered before app backup restore (aka. [App.RestoreBackup] call). + * + * Note that by default on success the application is restarted and the after state of the hook is ignored. + */ + onBackupRestore(): (hook.Hook) + /** + * OnModelValidate is triggered every time when a model is being validated + * (e.g. triggered by App.Validate() or App.Save()). + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelValidate(...tags: string[]): (hook.TaggedHook) + /** + * OnModelCreate is triggered every time when a new model is being created + * (e.g. triggered by App.Save()). + * + * Operations BEFORE the e.Next() execute before the model validation + * and the INSERT DB statement. + * + * Operations AFTER the e.Next() execute after the model validation + * and the INSERT DB statement. + * + * Note that successful execution doesn't guarantee that the model + * is persisted in the database since its wrapping transaction may + * not have been committed yet. + * If you wan to listen to only the actual persisted events, you can + * bind to [OnModelAfterCreateSuccess] or [OnModelAfterCreateError] hooks. + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelCreate(...tags: string[]): (hook.TaggedHook) + /** + * OnModelCreateExecute is triggered after successful Model validation + * and right before the model INSERT DB statement execution. + * + * Usually it is triggered as part of the App.Save() in the following firing order: + * OnModelCreate { + * ``` + * -> OnModelValidate (skipped with App.SaveNoValidate()) + * -> OnModelCreateExecute + * ``` + * } + * + * Note that successful execution doesn't guarantee that the model + * is persisted in the database since its wrapping transaction may have been + * committed yet. + * If you wan to listen to only the actual persisted events, + * you can bind to [OnModelAfterCreateSuccess] or [OnModelAfterCreateError] hooks. + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelCreateExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnModelAfterCreateSuccess is triggered after each successful + * Model DB create persistence. + * + * Note that when a Model is persisted as part of a transaction, + * this hook is triggered AFTER the transaction has been committed. + * This hook is NOT triggered in case the transaction rollbacks + * (aka. when the model wasn't persisted). + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelAfterCreateSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnModelAfterCreateError is triggered after each failed + * Model DB create persistence. + * Note that when a Model is persisted as part of a transaction, + * this hook is triggered in one of the following cases: + * ``` + * - immediately after App.Save() failure + * - on transaction rollback + * ``` + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelAfterCreateError(...tags: string[]): (hook.TaggedHook) + /** + * OnModelUpdate is triggered every time when a new model is being updated + * (e.g. triggered by App.Save()). + * + * Operations BEFORE the e.Next() execute before the model validation + * and the UPDATE DB statement. + * + * Operations AFTER the e.Next() execute after the model validation + * and the UPDATE DB statement. + * + * Note that successful execution doesn't guarantee that the model + * is persisted in the database since its wrapping transaction may + * not have been committed yet. + * If you wan to listen to only the actual persisted events, you can + * bind to [OnModelAfterUpdateSuccess] or [OnModelAfterUpdateError] hooks. + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelUpdate(...tags: string[]): (hook.TaggedHook) + /** + * OnModelUpdateExecute is triggered after successful Model validation + * and right before the model UPDATE DB statement execution. + * + * Usually it is triggered as part of the App.Save() in the following firing order: + * OnModelUpdate { + * ``` + * -> OnModelValidate (skipped with App.SaveNoValidate()) + * -> OnModelUpdateExecute + * ``` + * } + * + * Note that successful execution doesn't guarantee that the model + * is persisted in the database since its wrapping transaction may have been + * committed yet. + * If you wan to listen to only the actual persisted events, + * you can bind to [OnModelAfterUpdateSuccess] or [OnModelAfterUpdateError] hooks. + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelUpdateExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnModelAfterUpdateSuccess is triggered after each successful + * Model DB update persistence. + * + * Note that when a Model is persisted as part of a transaction, + * this hook is triggered AFTER the transaction has been committed. + * This hook is NOT triggered in case the transaction rollbacks + * (aka. when the model changes weren't persisted). + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnModelAfterUpdateError is triggered after each failed + * Model DB update persistence. + * + * Note that when a Model is persisted as part of a transaction, + * this hook is triggered in one of the following cases: + * ``` + * - immediately after App.Save() failure + * - on transaction rollback + * ``` + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelAfterUpdateError(...tags: string[]): (hook.TaggedHook) + /** + * OnModelDelete is triggered every time when a new model is being deleted + * (e.g. triggered by App.Delete()). + * + * Note that successful execution doesn't guarantee that the model + * is deleted from the database since its wrapping transaction may + * not have been committed yet. + * If you wan to listen to only the actual persisted deleted events, you can + * bind to [OnModelAfterDeleteSuccess] or [OnModelAfterDeleteError] hooks. + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelDelete(...tags: string[]): (hook.TaggedHook) + /** + * OnModelUpdateExecute is triggered right before the model + * DELETE DB statement execution. + * + * Usually it is triggered as part of the App.Delete() in the following firing order: + * OnModelDelete { + * ``` + * -> (internal delete checks) + * -> OnModelDeleteExecute + * ``` + * } + * + * Note that successful execution doesn't guarantee that the model + * is deleted from the database since its wrapping transaction may + * not have been committed yet. + * If you wan to listen to only the actual persisted deleted events, you can + * bind to [OnModelAfterDeleteSuccess] or [OnModelAfterDeleteError] hooks. + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelDeleteExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnModelAfterDeleteSuccess is triggered after each successful + * Model DB delete persistence. + * + * Note that when a Model is deleted as part of a transaction, + * this hook is triggered AFTER the transaction has been committed. + * This hook is NOT triggered in case the transaction rollbacks + * (aka. when the model delete wasn't persisted). + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnModelAfterDeleteError is triggered after each failed + * Model DB delete persistence. + * + * Note that when a Model is deleted as part of a transaction, + * this hook is triggered in one of the following cases: + * ``` + * - immediately after App.Delete() failure + * - on transaction rollback + * ``` + * + * For convenience, if you want to listen to only the Record models + * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks. + * + * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onModelAfterDeleteError(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordEnrich is triggered every time when a record is enriched + * (during realtime message seriazation, as part of the builtin Record + * responses, or when [apis.EnrichRecord] is invoked). + * + * It could be used for example to redact/hide or add computed temp + * Record model props only for the specific request info. For example: + * + * app.OnRecordEnrich("posts").BindFunc(func(e core.*RecordEnrichEvent) { + * ``` + * // hide one or more fields + * e.Record.Hide("role") + * + * // add new custom field for registered users + * if e.RequestInfo.Auth != nil && e.RequestInfo.Auth.Collection().Name == "users" { + * e.Record.WithCustomData(true) // for security requires explicitly allowing it + * e.Record.Set("computedScore", e.Record.GetInt("score") * e.RequestInfo.Auth.GetInt("baseScore")) + * } + * + * return e.Next() + * ``` + * }) + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordEnrich(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordValidate is a proxy Record model hook for [OnModelValidate]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordValidate(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordCreate is a proxy Record model hook for [OnModelCreate]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordCreate(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordCreateExecute is a proxy Record model hook for [OnModelCreateExecute]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordCreateExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAfterCreateSuccess is a proxy Record model hook for [OnModelAfterCreateSuccess]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAfterCreateSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAfterCreateError is a proxy Record model hook for [OnModelAfterCreateError]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAfterCreateError(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordUpdate is a proxy Record model hook for [OnModelUpdate]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordUpdate(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordUpdateExecute is a proxy Record model hook for [OnModelUpdateExecute]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordUpdateExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAfterUpdateSuccess is a proxy Record model hook for [OnModelAfterUpdateSuccess]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAfterUpdateError is a proxy Record model hook for [OnModelAfterUpdateError]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAfterUpdateError(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordDelete is a proxy Record model hook for [OnModelDelete]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordDelete(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordDeleteExecute is a proxy Record model hook for [OnModelDeleteExecute]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordDeleteExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAfterDeleteSuccess is a proxy Record model hook for [OnModelAfterDeleteSuccess]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAfterDeleteError is a proxy Record model hook for [OnModelAfterDeleteError]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAfterDeleteError(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionValidate is a proxy Collection model hook for [OnModelValidate]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionValidate(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionCreate is a proxy Collection model hook for [OnModelCreate]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionCreate(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionCreateExecute is a proxy Collection model hook for [OnModelCreateExecute]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionCreateExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionAfterCreateSuccess is a proxy Collection model hook for [OnModelAfterCreateSuccess]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionAfterCreateSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionAfterCreateError is a proxy Collection model hook for [OnModelAfterCreateError]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionAfterCreateError(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionUpdate is a proxy Collection model hook for [OnModelUpdate]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionUpdate(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionUpdateExecute is a proxy Collection model hook for [OnModelUpdateExecute]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionUpdateExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionAfterUpdateSuccess is a proxy Collection model hook for [OnModelAfterUpdateSuccess]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionAfterUpdateError is a proxy Collection model hook for [OnModelAfterUpdateError]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionAfterUpdateError(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionDelete is a proxy Collection model hook for [OnModelDelete]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionDelete(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionDeleteExecute is a proxy Collection model hook for [OnModelDeleteExecute]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionDeleteExecute(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionAfterDeleteSuccess is a proxy Collection model hook for [OnModelAfterDeleteSuccess]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionAfterDeleteError is a proxy Collection model hook for [OnModelAfterDeleteError]. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onCollectionAfterDeleteError(...tags: string[]): (hook.TaggedHook) + /** + * OnMailerSend hook is triggered every time when a new email is + * being send using the App.NewMailClient() instance. + * + * It allows intercepting the email message or to use a custom mailer client. + */ + onMailerSend(): (hook.Hook) + /** + * OnMailerRecordAuthAlertSend hook is triggered when + * sending a new device login auth alert email, allowing you to + * intercept and customize the email message that is being sent. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onMailerRecordAuthAlertSend(...tags: string[]): (hook.TaggedHook) + /** + * OnMailerBeforeRecordResetPasswordSend hook is triggered when + * sending a password reset email to an auth record, allowing + * you to intercept and customize the email message that is being sent. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onMailerRecordPasswordResetSend(...tags: string[]): (hook.TaggedHook) + /** + * OnMailerBeforeRecordVerificationSend hook is triggered when + * sending a verification email to an auth record, allowing + * you to intercept and customize the email message that is being sent. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onMailerRecordVerificationSend(...tags: string[]): (hook.TaggedHook) + /** + * OnMailerRecordEmailChangeSend hook is triggered when sending a + * confirmation new address email to an auth record, allowing + * you to intercept and customize the email message that is being sent. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onMailerRecordEmailChangeSend(...tags: string[]): (hook.TaggedHook) + /** + * OnMailerRecordOTPSend hook is triggered when sending an OTP email + * to an auth record, allowing you to intercept and customize the + * email message that is being sent. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onMailerRecordOTPSend(...tags: string[]): (hook.TaggedHook) + /** + * OnRealtimeConnectRequest hook is triggered when establishing the SSE client connection. + * + * Any execution after [e.Next()] of a hook handler happens after the client disconnects. + */ + onRealtimeConnectRequest(): (hook.Hook) + /** + * OnRealtimeMessageSend hook is triggered when sending an SSE message to a client. + */ + onRealtimeMessageSend(): (hook.Hook) + /** + * OnRealtimeSubscribeRequest hook is triggered when updating the + * client subscriptions, allowing you to further validate and + * modify the submitted change. + */ + onRealtimeSubscribeRequest(): (hook.Hook) + /** + * OnSettingsListRequest hook is triggered on each API Settings list request. + * + * Could be used to validate or modify the response before returning it to the client. + */ + onSettingsListRequest(): (hook.Hook) + /** + * OnSettingsUpdateRequest hook is triggered on each API Settings update request. + * + * Could be used to additionally validate the request data or + * implement completely different persistence behavior. + */ + onSettingsUpdateRequest(): (hook.Hook) + /** + * OnSettingsReload hook is triggered every time when the App.Settings() + * is being replaced with a new state. + * + * Calling App.Settings() after e.Next() should return the new state. + */ + onSettingsReload(): (hook.Hook) + /** + * OnFileDownloadRequest hook is triggered before each API File download request. + * + * Could be used to validate or modify the file response before + * returning it to the client. + */ + onFileDownloadRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnFileBeforeTokenRequest hook is triggered on each file token API request. + */ + onFileTokenRequest(): (hook.Hook) + /** + * OnRecordAuthRequest hook is triggered on each successful API + * record authentication request (sign-in, token refresh, etc.). + * + * Could be used to additionally validate or modify the authenticated + * record data and token. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAuthRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAuthWithPasswordRequest hook is triggered on each + * Record auth with password API request. + * + * RecordAuthWithPasswordRequestEvent.Record could be nil if no + * matching identity is found, allowing you to manually locate a different + * Record model (by reassigning [RecordAuthWithPasswordRequestEvent.Record]). + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAuthWithPasswordRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAuthWithOAuth2Request hook is triggered on each Record + * OAuth2 sign-in/sign-up API request (after token exchange and before external provider linking). + * + * If the [RecordAuthWithOAuth2RequestEvent.Record] is not set, then the OAuth2 + * request will try to create a new auth Record. + * + * To assign or link a different existing record model you can + * change the [RecordAuthWithOAuth2RequestEvent.Record] field. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAuthWithOAuth2Request(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAuthRefreshRequest hook is triggered on each Record + * auth refresh API request (right before generating a new auth token). + * + * Could be used to additionally validate the request data or implement + * completely different auth refresh behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAuthRefreshRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordRequestPasswordResetRequest hook is triggered on + * each Record request password reset API request. + * + * Could be used to additionally validate the request data or implement + * completely different password reset behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordRequestPasswordResetRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordConfirmPasswordResetRequest hook is triggered on + * each Record confirm password reset API request. + * + * Could be used to additionally validate the request data or implement + * completely different persistence behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordConfirmPasswordResetRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordRequestVerificationRequest hook is triggered on + * each Record request verification API request. + * + * Could be used to additionally validate the loaded request data or implement + * completely different verification behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordRequestVerificationRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordConfirmVerificationRequest hook is triggered on each + * Record confirm verification API request. + * + * Could be used to additionally validate the request data or implement + * completely different persistence behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordConfirmVerificationRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordRequestEmailChangeRequest hook is triggered on each + * Record request email change API request. + * + * Could be used to additionally validate the request data or implement + * completely different request email change behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordRequestEmailChangeRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordConfirmEmailChangeRequest hook is triggered on each + * Record confirm email change API request. + * + * Could be used to additionally validate the request data or implement + * completely different persistence behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordConfirmEmailChangeRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordRequestOTPRequest hook is triggered on each Record + * request OTP API request. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordRequestOTPRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordAuthWithOTPRequest hook is triggered on each Record + * auth with OTP API request. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordAuthWithOTPRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordsListRequest hook is triggered on each API Records list request. + * + * Could be used to validate or modify the response before returning it to the client. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordsListRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordViewRequest hook is triggered on each API Record view request. + * + * Could be used to validate or modify the response before returning it to the client. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordViewRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordCreateRequest hook is triggered on each API Record create request. + * + * Could be used to additionally validate the request data or implement + * completely different persistence behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordCreateRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordUpdateRequest hook is triggered on each API Record update request. + * + * Could be used to additionally validate the request data or implement + * completely different persistence behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordUpdateRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnRecordDeleteRequest hook is triggered on each API Record delete request. + * + * Could be used to additionally validate the request data or implement + * completely different delete behavior. + * + * If the optional "tags" list (Collection ids or names) is specified, + * then all event handlers registered via the created hook will be + * triggered and called only if their event data origin matches the tags. + */ + onRecordDeleteRequest(...tags: string[]): (hook.TaggedHook) + /** + * OnCollectionsListRequest hook is triggered on each API Collections list request. + * + * Could be used to validate or modify the response before returning it to the client. + */ + onCollectionsListRequest(): (hook.Hook) + /** + * OnCollectionViewRequest hook is triggered on each API Collection view request. + * + * Could be used to validate or modify the response before returning it to the client. + */ + onCollectionViewRequest(): (hook.Hook) + /** + * OnCollectionCreateRequest hook is triggered on each API Collection create request. + * + * Could be used to additionally validate the request data or implement + * completely different persistence behavior. + */ + onCollectionCreateRequest(): (hook.Hook) + /** + * OnCollectionUpdateRequest hook is triggered on each API Collection update request. + * + * Could be used to additionally validate the request data or implement + * completely different persistence behavior. + */ + onCollectionUpdateRequest(): (hook.Hook) + /** + * OnCollectionDeleteRequest hook is triggered on each API Collection delete request. + * + * Could be used to additionally validate the request data or implement + * completely different delete behavior. + */ + onCollectionDeleteRequest(): (hook.Hook) + /** + * OnCollectionsBeforeImportRequest hook is triggered on each API + * collections import request. + * + * Could be used to additionally validate the imported collections or + * to implement completely different import behavior. + */ + onCollectionsImportRequest(): (hook.Hook) + /** + * OnBatchRequest hook is triggered on each API batch request. + * + * Could be used to additionally validate or modify the submitted batch requests. + */ + onBatchRequest(): (hook.Hook) + } + // @ts-ignore + import validation = ozzo_validation + /** + * DBConnectFunc defines a database connection initialization function. + */ + interface DBConnectFunc {(dbPath: string): (dbx.DB) } + /** + * RequestEvent defines the PocketBase router handler event. + */ + type _subUBxxF = router.Event + interface RequestEvent extends _subUBxxF { + app: App + auth?: Record + } + interface RequestEvent { + /** + * RealIP returns the "real" IP address from the configured trusted proxy headers. + * + * If Settings.TrustedProxy is not configured or the found IP is empty, + * it fallbacks to e.RemoteIP(). + * + * NB! + * Be careful when used in a security critical context as it relies on + * the trusted proxy to be properly configured and your app to be accessible only through it. + * If you are not sure, use e.RemoteIP(). + */ + realIP(): string + } + interface RequestEvent { + /** + * HasSuperuserAuth checks whether the current RequestEvent has superuser authentication loaded. + */ + hasSuperuserAuth(): boolean + } + interface RequestEvent { + /** + * RequestInfo parses the current request into RequestInfo instance. + * + * Note that the returned result is cached to avoid copying the request data multiple times + * but the auth state and other common store items are always refreshed in case they were changed my another handler. + */ + requestInfo(): (RequestInfo) + } + interface InternalRequest { + /** + * note: for uploading files the value must be either *filesystem.File or []*filesystem.File + */ + body: _TygojaDict + headers: _TygojaDict + method: string + url: string + } + interface InternalRequest { + validate(): void + } + type _subUYlom = BaseModel + interface Record extends _subUYlom { + } + interface Record { + /** + * Collection returns the Collection model associated with the current Record model. + * + * NB! The returned collection is only for read purposes and it shouldn't be modified + * because it could have unintended side-effects on other Record models from the same collection. + */ + collection(): (Collection) + } + interface Record { + /** + * TableName returns the table name associated with the current Record model. + */ + tableName(): string + } + interface Record { + /** + * PostScan implements the [dbx.PostScanner] interface. + * + * It essentially refreshes/updates the current Record original state + * as if the model was fetched from the databases for the first time. + * + * Or in other words, it means that m.Original().FieldsData() will have + * the same values as m.Record().FieldsData(). + */ + postScan(): void + } + interface Record { + /** + * HookTags returns the hook tags associated with the current record. + */ + hookTags(): Array + } + interface Record { + /** + * BaseFilesPath returns the storage dir path used by the record. + */ + baseFilesPath(): string + } + interface Record { + /** + * Original returns a shallow copy of the current record model populated + * with its ORIGINAL db data state (aka. right after PostScan()) + * and everything else reset to the defaults. + * + * If record was created using NewRecord() the original will be always + * a blank record (until PostScan() is invoked). + */ + original(): (Record) + } + interface Record { + /** + * Fresh returns a shallow copy of the current record model populated + * with its LATEST data state and everything else reset to the defaults + * (aka. no expand, no unknown fields and with default visibility flags). + */ + fresh(): (Record) + } + interface Record { + /** + * Clone returns a shallow copy of the current record model with all of + * its collection and unknown fields data, expand and flags copied. + * + * use [Record.Fresh()] instead if you want a copy with only the latest + * collection fields data and everything else reset to the defaults. + */ + clone(): (Record) + } + interface Record { + /** + * Expand returns a shallow copy of the current Record model expand data (if any). + */ + expand(): _TygojaDict + } + interface Record { + /** + * SetExpand replaces the current Record's expand with the provided expand arg data (shallow copied). + */ + setExpand(expand: _TygojaDict): void + } + interface Record { + /** + * MergeExpand merges recursively the provided expand data into + * the current model's expand (if any). + * + * Note that if an expanded prop with the same key is a slice (old or new expand) + * then both old and new records will be merged into a new slice (aka. a :merge: [b,c] => [a,b,c]). + * Otherwise the "old" expanded record will be replace with the "new" one (aka. a :merge: aNew => aNew). + */ + mergeExpand(expand: _TygojaDict): void + } + interface Record { + /** + * FieldsData returns a shallow copy ONLY of the collection's fields record's data. + */ + fieldsData(): _TygojaDict + } + interface Record { + /** + * CustomData returns a shallow copy ONLY of the custom record fields data, + * aka. fields that are neither defined by the collection, nor special system ones. + * + * Note that custom fields prefixed with "@pbInternal" are always skipped. + */ + customData(): _TygojaDict + } + interface Record { + /** + * WithCustomData toggles the export/serialization of custom data fields + * (false by default). + */ + withCustomData(state: boolean): (Record) + } + interface Record { + /** + * IgnoreEmailVisibility toggles the flag to ignore the auth record email visibility check. + */ + ignoreEmailVisibility(state: boolean): (Record) + } + interface Record { + /** + * IgnoreUnchangedFields toggles the flag to ignore the unchanged fields + * from the DB export for the UPDATE SQL query. + * + * This could be used if you want to save only the record fields that you've changed + * without overwrite other untouched fields in case of concurrent update. + */ + ignoreUnchangedFields(state: boolean): (Record) + } + interface Record { + /** + * Set sets the provided key-value data pair into the current Record + * model directly as it is WITHOUT NORMALIZATIONS. + * + * See also [Record.Set]. + */ + setRaw(key: string, value: any): void + } + interface Record { + /** + * SetIfFieldExists sets the provided key-value data pair into the current Record model + * ONLY if key is existing Collection field name/modifier. + * + * This method does nothing if key is not a known Collection field name/modifier. + * + * On success returns the matched Field, otherwise - nil. + * + * To set any key-value, including custom/unknown fields, use the [Record.Set] method. + */ + setIfFieldExists(key: string, value: any): Field + } + interface Record { + /** + * Set sets the provided key-value data pair into the current Record model. + * + * If the record collection has field with name matching the provided "key", + * the value will be further normalized according to the field setter(s). + */ + set(key: string, value: any): void + } + interface Record { + getRaw(key: string): any + } + interface Record { + /** + * Get returns a normalized single record model data value for "key". + */ + get(key: string): any + } + interface Record { + /** + * Load bulk loads the provided data into the current Record model. + */ + load(data: _TygojaDict): void + } + interface Record { + /** + * GetBool returns the data value for "key" as a bool. + */ + getBool(key: string): boolean + } + interface Record { + /** + * GetString returns the data value for "key" as a string. + */ + getString(key: string): string + } + interface Record { + /** + * GetInt returns the data value for "key" as an int. + */ + getInt(key: string): number + } + interface Record { + /** + * GetFloat returns the data value for "key" as a float64. + */ + getFloat(key: string): number + } + interface Record { + /** + * GetDateTime returns the data value for "key" as a DateTime instance. + */ + getDateTime(key: string): types.DateTime + } + interface Record { + /** + * GetStringSlice returns the data value for "key" as a slice of non-zero unique strings. + */ + getStringSlice(key: string): Array + } + interface Record { + /** + * GetUploadedFiles returns the uploaded files for the provided "file" field key, + * (aka. the current [*filesytem.File] values) so that you can apply further + * validations or modifications (including changing the file name or content before persisting). + * + * Example: + * + * ``` + * files := record.GetUploadedFiles("documents") + * for _, f := range files { + * f.Name = "doc_" + f.Name // add a prefix to each file name + * } + * app.Save(record) // the files are pointers so the applied changes will transparently reflect on the record value + * ``` + */ + getUploadedFiles(key: string): Array<(filesystem.File | undefined)> + } + interface Record { + /** + * Retrieves the "key" json field value and unmarshals it into "result". + * + * Example + * + * ``` + * result := struct { + * FirstName string `json:"first_name"` + * }{} + * err := m.UnmarshalJSONField("my_field_name", &result) + * ``` + */ + unmarshalJSONField(key: string, result: any): void + } + interface Record { + /** + * ExpandedOne retrieves a single relation Record from the already + * loaded expand data of the current model. + * + * If the requested expand relation is multiple, this method returns + * only first available Record from the expanded relation. + * + * Returns nil if there is no such expand relation loaded. + */ + expandedOne(relField: string): (Record) + } + interface Record { + /** + * ExpandedAll retrieves a slice of relation Records from the already + * loaded expand data of the current model. + * + * If the requested expand relation is single, this method normalizes + * the return result and will wrap the single model as a slice. + * + * Returns nil slice if there is no such expand relation loaded. + */ + expandedAll(relField: string): Array<(Record | undefined)> + } + interface Record { + /** + * FindFileFieldByFile returns the first file type field for which + * any of the record's data contains the provided filename. + */ + findFileFieldByFile(filename: string): (FileField) + } + interface Record { + /** + * DBExport implements the [DBExporter] interface and returns a key-value + * map with the data to be persisted when saving the Record in the database. + */ + dbExport(app: App): _TygojaDict + } + interface Record { + /** + * Hide hides the specified fields from the public safe serialization of the record. + */ + hide(...fieldNames: string[]): (Record) + } + interface Record { + /** + * Unhide forces to unhide the specified fields from the public safe serialization + * of the record (even when the collection field itself is marked as hidden). + */ + unhide(...fieldNames: string[]): (Record) + } + interface Record { + /** + * PublicExport exports only the record fields that are safe to be public. + * + * To export unknown data fields you need to set record.WithCustomData(true). + * + * For auth records, to force the export of the email field you need to set + * record.IgnoreEmailVisibility(true). + */ + publicExport(): _TygojaDict + } + interface Record { + /** + * MarshalJSON implements the [json.Marshaler] interface. + * + * Only the data exported by `PublicExport()` will be serialized. + */ + marshalJSON(): string|Array + } + interface Record { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(data: string|Array): void + } + interface Record { + /** + * ReplaceModifiers returns a new map with applied modifier + * values based on the current record and the specified data. + * + * The resolved modifier keys will be removed. + * + * Multiple modifiers will be applied one after another, + * while reusing the previous base key value result (ex. 1; -5; +2 => -2). + * + * Note that because Go doesn't guaranteed the iteration order of maps, + * we would explicitly apply shorter keys first for a more consistent and reproducible behavior. + * + * Example usage: + * + * ``` + * newData := record.ReplaceModifiers(data) + * // record: {"field": 10} + * // data: {"field+": 5} + * // result: {"field": 15} + * ``` + */ + replaceModifiers(data: _TygojaDict): _TygojaDict + } + interface Record { + /** + * Email returns the "email" record field value (usually available with Auth collections). + */ + email(): string + } + interface Record { + /** + * SetEmail sets the "email" record field value (usually available with Auth collections). + */ + setEmail(email: string): void + } + interface Record { + /** + * Verified returns the "emailVisibility" record field value (usually available with Auth collections). + */ + emailVisibility(): boolean + } + interface Record { + /** + * SetEmailVisibility sets the "emailVisibility" record field value (usually available with Auth collections). + */ + setEmailVisibility(visible: boolean): void + } + interface Record { + /** + * Verified returns the "verified" record field value (usually available with Auth collections). + */ + verified(): boolean + } + interface Record { + /** + * SetVerified sets the "verified" record field value (usually available with Auth collections). + */ + setVerified(verified: boolean): void + } + interface Record { + /** + * TokenKey returns the "tokenKey" record field value (usually available with Auth collections). + */ + tokenKey(): string + } + interface Record { + /** + * SetTokenKey sets the "tokenKey" record field value (usually available with Auth collections). + */ + setTokenKey(key: string): void + } + interface Record { + /** + * RefreshTokenKey generates and sets a new random auth record "tokenKey". + */ + refreshTokenKey(): void + } + interface Record { + /** + * SetPassword sets the "password" record field value (usually available with Auth collections). + */ + setPassword(password: string): void + } + interface Record { + /** + * ValidatePassword validates a plain password against the "password" record field. + * + * Returns false if the password is incorrect. + */ + validatePassword(password: string): boolean + } + interface Record { + /** + * IsSuperuser returns whether the current record is a superuser, aka. + * whether the record is from the _superusers collection. + */ + isSuperuser(): boolean + } + interface Record { + /** + * NewStaticAuthToken generates and returns a new static record authentication token. + * + * Static auth tokens are similar to the regular auth tokens, but are + * non-refreshable and support custom duration. + * + * Zero or negative duration will fallback to the duration from the auth collection settings. + */ + newStaticAuthToken(duration: time.Duration): string + } + interface Record { + /** + * NewAuthToken generates and returns a new record authentication token. + */ + newAuthToken(): string + } + interface Record { + /** + * NewVerificationToken generates and returns a new record verification token. + */ + newVerificationToken(): string + } + interface Record { + /** + * NewPasswordResetToken generates and returns a new auth record password reset request token. + */ + newPasswordResetToken(): string + } + interface Record { + /** + * NewEmailChangeToken generates and returns a new auth record change email request token. + */ + newEmailChangeToken(newEmail: string): string + } + interface Record { + /** + * NewFileToken generates and returns a new record private file access token. + */ + newFileToken(): string + } +} + /** * Package sync provides basic synchronization primitives such as mutual * exclusion locks. Other than the [Once] and [WaitGroup] types, most are intended @@ -13723,6 +13715,25 @@ namespace sync { } } +/** + * Package io provides basic interfaces to I/O primitives. + * Its primary job is to wrap existing implementations of such primitives, + * such as those in package os, into shared public interfaces that + * abstract the functionality, plus some other related primitives. + * + * Because these interfaces and primitives wrap lower-level operations with + * various implementations, unless otherwise informed clients should not + * assume they are safe for parallel execution. + */ +namespace io { + /** + * WriteCloser is the interface that groups the basic Write and Close methods. + */ + interface WriteCloser { + [key:string]: any; + } +} + /** * Package syscall contains an interface to the low-level operating system * primitives. The details vary depending on the underlying system, and @@ -13975,25 +13986,6 @@ namespace time { namespace context { } -/** - * Package io provides basic interfaces to I/O primitives. - * Its primary job is to wrap existing implementations of such primitives, - * such as those in package os, into shared public interfaces that - * abstract the functionality, plus some other related primitives. - * - * Because these interfaces and primitives wrap lower-level operations with - * various implementations, unless otherwise informed clients should not - * assume they are safe for parallel execution. - */ -namespace io { - /** - * WriteCloser is the interface that groups the basic Write and Close methods. - */ - interface WriteCloser { - [key:string]: any; - } -} - /** * Package fs defines basic interfaces to a file system. * A file system can be provided by the host operating system @@ -14005,467 +13997,6 @@ namespace io { namespace fs { } -/** - * Package url parses URLs and implements query escaping. - */ -namespace url { - /** - * A URL represents a parsed URL (technically, a URI reference). - * - * The general form represented is: - * - * ``` - * [scheme:][//[userinfo@]host][/]path[?query][#fragment] - * ``` - * - * URLs that do not start with a slash after the scheme are interpreted as: - * - * ``` - * scheme:opaque[?query][#fragment] - * ``` - * - * The Host field contains the host and port subcomponents of the URL. - * When the port is present, it is separated from the host with a colon. - * When the host is an IPv6 address, it must be enclosed in square brackets: - * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port - * into a string suitable for the Host field, adding square brackets to - * the host when necessary. - * - * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/. - * A consequence is that it is impossible to tell which slashes in the Path were - * slashes in the raw URL and which were %2f. This distinction is rarely important, - * but when it is, the code should use the [URL.EscapedPath] method, which preserves - * the original encoding of Path. - * - * The RawPath field is an optional field which is only set when the default - * encoding of Path is different from the escaped path. See the EscapedPath method - * for more details. - * - * URL's String method uses the EscapedPath method to obtain the path. - */ - interface URL { - scheme: string - opaque: string // encoded opaque data - user?: Userinfo // username and password information - host: string // host or host:port (see Hostname and Port methods) - path: string // path (relative paths may omit leading slash) - rawPath: string // encoded path hint (see EscapedPath method) - omitHost: boolean // do not emit empty host (authority) - forceQuery: boolean // append a query ('?') even if RawQuery is empty - rawQuery: string // encoded query values, without '?' - fragment: string // fragment for references, without '#' - rawFragment: string // encoded fragment hint (see EscapedFragment method) - } - interface URL { - /** - * EscapedPath returns the escaped form of u.Path. - * In general there are multiple possible escaped forms of any path. - * EscapedPath returns u.RawPath when it is a valid escaping of u.Path. - * Otherwise EscapedPath ignores u.RawPath and computes an escaped - * form on its own. - * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct - * their results. - * In general, code should call EscapedPath instead of - * reading u.RawPath directly. - */ - escapedPath(): string - } - interface URL { - /** - * EscapedFragment returns the escaped form of u.Fragment. - * In general there are multiple possible escaped forms of any fragment. - * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment. - * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped - * form on its own. - * The [URL.String] method uses EscapedFragment to construct its result. - * In general, code should call EscapedFragment instead of - * reading u.RawFragment directly. - */ - escapedFragment(): string - } - interface URL { - /** - * String reassembles the [URL] into a valid URL string. - * The general form of the result is one of: - * - * ``` - * scheme:opaque?query#fragment - * scheme://userinfo@host/path?query#fragment - * ``` - * - * If u.Opaque is non-empty, String uses the first form; - * otherwise it uses the second form. - * Any non-ASCII characters in host are escaped. - * To obtain the path, String uses u.EscapedPath(). - * - * In the second form, the following rules apply: - * ``` - * - if u.Scheme is empty, scheme: is omitted. - * - if u.User is nil, userinfo@ is omitted. - * - if u.Host is empty, host/ is omitted. - * - if u.Scheme and u.Host are empty and u.User is nil, - * the entire scheme://userinfo@host/ is omitted. - * - if u.Host is non-empty and u.Path begins with a /, - * the form host/path does not add its own /. - * - if u.RawQuery is empty, ?query is omitted. - * - if u.Fragment is empty, #fragment is omitted. - * ``` - */ - string(): string - } - interface URL { - /** - * Redacted is like [URL.String] but replaces any password with "xxxxx". - * Only the password in u.User is redacted. - */ - redacted(): string - } - /** - * Values maps a string key to a list of values. - * It is typically used for query parameters and form values. - * Unlike in the http.Header map, the keys in a Values map - * are case-sensitive. - */ - interface Values extends _TygojaDict{} - interface Values { - /** - * Get gets the first value associated with the given key. - * If there are no values associated with the key, Get returns - * the empty string. To access multiple values, use the map - * directly. - */ - get(key: string): string - } - interface Values { - /** - * Set sets the key to value. It replaces any existing - * values. - */ - set(key: string, value: string): void - } - interface Values { - /** - * Add adds the value to key. It appends to any existing - * values associated with key. - */ - add(key: string, value: string): void - } - interface Values { - /** - * Del deletes the values associated with key. - */ - del(key: string): void - } - interface Values { - /** - * Has checks whether a given key is set. - */ - has(key: string): boolean - } - interface Values { - /** - * Encode encodes the values into “URL encoded” form - * ("bar=baz&foo=quux") sorted by key. - */ - encode(): string - } - interface URL { - /** - * IsAbs reports whether the [URL] is absolute. - * Absolute means that it has a non-empty scheme. - */ - isAbs(): boolean - } - interface URL { - /** - * Parse parses a [URL] in the context of the receiver. The provided URL - * may be relative or absolute. Parse returns nil, err on parse - * failure, otherwise its return value is the same as [URL.ResolveReference]. - */ - parse(ref: string): (URL) - } - interface URL { - /** - * ResolveReference resolves a URI reference to an absolute URI from - * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference - * may be relative or absolute. ResolveReference always returns a new - * [URL] instance, even if the returned URL is identical to either the - * base or reference. If ref is an absolute URL, then ResolveReference - * ignores base and returns a copy of ref. - */ - resolveReference(ref: URL): (URL) - } - interface URL { - /** - * Query parses RawQuery and returns the corresponding values. - * It silently discards malformed value pairs. - * To check errors use [ParseQuery]. - */ - query(): Values - } - interface URL { - /** - * RequestURI returns the encoded path?query or opaque?query - * string that would be used in an HTTP request for u. - */ - requestURI(): string - } - interface URL { - /** - * Hostname returns u.Host, stripping any valid port number if present. - * - * If the result is enclosed in square brackets, as literal IPv6 addresses are, - * the square brackets are removed from the result. - */ - hostname(): string - } - interface URL { - /** - * Port returns the port part of u.Host, without the leading colon. - * - * If u.Host doesn't contain a valid numeric port, Port returns an empty string. - */ - port(): string - } - interface URL { - marshalBinary(): string|Array - } - interface URL { - unmarshalBinary(text: string|Array): void - } - interface URL { - /** - * JoinPath returns a new [URL] with the provided path elements joined to - * any existing path and the resulting path cleaned of any ./ or ../ elements. - * Any sequences of multiple / characters will be reduced to a single /. - */ - joinPath(...elem: string[]): (URL) - } -} - -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { -} - -/** - * Package sql provides a generic interface around SQL (or SQL-like) - * databases. - * - * The sql package must be used in conjunction with a database driver. - * See https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until - * after the query is completed. - * - * For usage examples, see the wiki page at - * https://golang.org/s/sqlwiki. - */ -namespace sql { - /** - * IsolationLevel is the transaction isolation level used in [TxOptions]. - */ - interface IsolationLevel extends Number{} - interface IsolationLevel { - /** - * String returns the name of the transaction isolation level. - */ - string(): string - } - /** - * DBStats contains database statistics. - */ - interface DBStats { - maxOpenConnections: number // Maximum number of open connections to the database. - /** - * Pool Status - */ - openConnections: number // The number of established connections both in use and idle. - inUse: number // The number of connections currently in use. - idle: number // The number of idle connections. - /** - * Counters - */ - waitCount: number // The total number of connections waited for. - waitDuration: time.Duration // The total time blocked waiting for a new connection. - maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. - maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. - maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. - } - /** - * Conn represents a single database connection rather than a pool of database - * connections. Prefer running queries from [DB] unless there is a specific - * need for a continuous single database connection. - * - * A Conn must call [Conn.Close] to return the connection to the database pool - * and may do so concurrently with a running query. - * - * After a call to [Conn.Close], all operations on the - * connection fail with [ErrConnDone]. - */ - interface Conn { - } - interface Conn { - /** - * PingContext verifies the connection to the database is still alive. - */ - pingContext(ctx: context.Context): void - } - interface Conn { - /** - * ExecContext executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Conn { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface Conn { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * the [*Row.Scan] method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface Conn { - /** - * PrepareContext creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface Conn { - /** - * Raw executes f exposing the underlying driver connection for the - * duration of f. The driverConn must not be used outside of f. - * - * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable - * until [Conn.Close] is called. - */ - raw(f: (driverConn: any) => void): void - } - interface Conn { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled back. - * If the context is canceled, the sql package will roll back - * the transaction. [Tx.Commit] will return an error if the context provided to - * BeginTx is canceled. - * - * The provided [TxOptions] is optional and may be nil if defaults should be used. - * If a non-default isolation level is used that the driver doesn't support, - * an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): (Tx) - } - interface Conn { - /** - * Close returns the connection to the connection pool. - * All operations after a Close will return with [ErrConnDone]. - * Close is safe to call concurrently with other operations and will - * block until all other operations finish. It may be useful to first - * cancel any used context and then call close directly after. - */ - close(): void - } - /** - * ColumnType contains the name and type of a column. - */ - interface ColumnType { - } - interface ColumnType { - /** - * Name returns the name or alias of the column. - */ - name(): string - } - interface ColumnType { - /** - * Length returns the column type length for variable length column types such - * as text and binary field types. If the type length is unbounded the value will - * be [math.MaxInt64] (any database limits will still apply). - * If the column type is not variable length, such as an int, or if not supported - * by the driver ok is false. - */ - length(): [number, boolean] - } - interface ColumnType { - /** - * DecimalSize returns the scale and precision of a decimal type. - * If not applicable or if not supported ok is false. - */ - decimalSize(): [number, boolean] - } - interface ColumnType { - /** - * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. - * If a driver does not support this property ScanType will return - * the type of an empty interface. - */ - scanType(): any - } - interface ColumnType { - /** - * Nullable reports whether the column may be null. - * If a driver does not support this property ok will be false. - */ - nullable(): boolean - } - interface ColumnType { - /** - * DatabaseTypeName returns the database system name of the column type. If an empty - * string is returned, then the driver type name is not supported. - * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers - * are not included. - * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", - * "INT", and "BIGINT". - */ - databaseTypeName(): string - } - /** - * Row is the result of calling [DB.QueryRow] to select a single row. - */ - interface Row { - } - interface Row { - /** - * Scan copies the columns from the matched row into the values - * pointed at by dest. See the documentation on [Rows.Scan] for details. - * If more than one row matches the query, - * Scan uses the first row and discards the rest. If no row matches - * the query, Scan returns [ErrNoRows]. - */ - scan(...dest: any[]): void - } - interface Row { - /** - * Err provides a way for wrapping packages to check for - * query errors without calling [Row.Scan]. - * Err returns the error, if any, that was encountered while running the query. - * If this error is not nil, this error will also be returned from [Row.Scan]. - */ - err(): void - } -} - /** * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer * object, creating another object (Reader or Writer) that also implements @@ -14839,6 +14370,442 @@ namespace net { } } +/** + * Package url parses URLs and implements query escaping. + */ +namespace url { + /** + * A URL represents a parsed URL (technically, a URI reference). + * + * The general form represented is: + * + * ``` + * [scheme:][//[userinfo@]host][/]path[?query][#fragment] + * ``` + * + * URLs that do not start with a slash after the scheme are interpreted as: + * + * ``` + * scheme:opaque[?query][#fragment] + * ``` + * + * The Host field contains the host and port subcomponents of the URL. + * When the port is present, it is separated from the host with a colon. + * When the host is an IPv6 address, it must be enclosed in square brackets: + * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port + * into a string suitable for the Host field, adding square brackets to + * the host when necessary. + * + * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/. + * A consequence is that it is impossible to tell which slashes in the Path were + * slashes in the raw URL and which were %2f. This distinction is rarely important, + * but when it is, the code should use the [URL.EscapedPath] method, which preserves + * the original encoding of Path. + * + * The RawPath field is an optional field which is only set when the default + * encoding of Path is different from the escaped path. See the EscapedPath method + * for more details. + * + * URL's String method uses the EscapedPath method to obtain the path. + */ + interface URL { + scheme: string + opaque: string // encoded opaque data + user?: Userinfo // username and password information + host: string // host or host:port (see Hostname and Port methods) + path: string // path (relative paths may omit leading slash) + rawPath: string // encoded path hint (see EscapedPath method) + omitHost: boolean // do not emit empty host (authority) + forceQuery: boolean // append a query ('?') even if RawQuery is empty + rawQuery: string // encoded query values, without '?' + fragment: string // fragment for references, without '#' + rawFragment: string // encoded fragment hint (see EscapedFragment method) + } + interface URL { + /** + * EscapedPath returns the escaped form of u.Path. + * In general there are multiple possible escaped forms of any path. + * EscapedPath returns u.RawPath when it is a valid escaping of u.Path. + * Otherwise EscapedPath ignores u.RawPath and computes an escaped + * form on its own. + * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct + * their results. + * In general, code should call EscapedPath instead of + * reading u.RawPath directly. + */ + escapedPath(): string + } + interface URL { + /** + * EscapedFragment returns the escaped form of u.Fragment. + * In general there are multiple possible escaped forms of any fragment. + * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment. + * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped + * form on its own. + * The [URL.String] method uses EscapedFragment to construct its result. + * In general, code should call EscapedFragment instead of + * reading u.RawFragment directly. + */ + escapedFragment(): string + } + interface URL { + /** + * String reassembles the [URL] into a valid URL string. + * The general form of the result is one of: + * + * ``` + * scheme:opaque?query#fragment + * scheme://userinfo@host/path?query#fragment + * ``` + * + * If u.Opaque is non-empty, String uses the first form; + * otherwise it uses the second form. + * Any non-ASCII characters in host are escaped. + * To obtain the path, String uses u.EscapedPath(). + * + * In the second form, the following rules apply: + * ``` + * - if u.Scheme is empty, scheme: is omitted. + * - if u.User is nil, userinfo@ is omitted. + * - if u.Host is empty, host/ is omitted. + * - if u.Scheme and u.Host are empty and u.User is nil, + * the entire scheme://userinfo@host/ is omitted. + * - if u.Host is non-empty and u.Path begins with a /, + * the form host/path does not add its own /. + * - if u.RawQuery is empty, ?query is omitted. + * - if u.Fragment is empty, #fragment is omitted. + * ``` + */ + string(): string + } + interface URL { + /** + * Redacted is like [URL.String] but replaces any password with "xxxxx". + * Only the password in u.User is redacted. + */ + redacted(): string + } + /** + * Values maps a string key to a list of values. + * It is typically used for query parameters and form values. + * Unlike in the http.Header map, the keys in a Values map + * are case-sensitive. + */ + interface Values extends _TygojaDict{} + interface Values { + /** + * Get gets the first value associated with the given key. + * If there are no values associated with the key, Get returns + * the empty string. To access multiple values, use the map + * directly. + */ + get(key: string): string + } + interface Values { + /** + * Set sets the key to value. It replaces any existing + * values. + */ + set(key: string, value: string): void + } + interface Values { + /** + * Add adds the value to key. It appends to any existing + * values associated with key. + */ + add(key: string, value: string): void + } + interface Values { + /** + * Del deletes the values associated with key. + */ + del(key: string): void + } + interface Values { + /** + * Has checks whether a given key is set. + */ + has(key: string): boolean + } + interface Values { + /** + * Encode encodes the values into “URL encoded” form + * ("bar=baz&foo=quux") sorted by key. + */ + encode(): string + } + interface URL { + /** + * IsAbs reports whether the [URL] is absolute. + * Absolute means that it has a non-empty scheme. + */ + isAbs(): boolean + } + interface URL { + /** + * Parse parses a [URL] in the context of the receiver. The provided URL + * may be relative or absolute. Parse returns nil, err on parse + * failure, otherwise its return value is the same as [URL.ResolveReference]. + */ + parse(ref: string): (URL) + } + interface URL { + /** + * ResolveReference resolves a URI reference to an absolute URI from + * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference + * may be relative or absolute. ResolveReference always returns a new + * [URL] instance, even if the returned URL is identical to either the + * base or reference. If ref is an absolute URL, then ResolveReference + * ignores base and returns a copy of ref. + */ + resolveReference(ref: URL): (URL) + } + interface URL { + /** + * Query parses RawQuery and returns the corresponding values. + * It silently discards malformed value pairs. + * To check errors use [ParseQuery]. + */ + query(): Values + } + interface URL { + /** + * RequestURI returns the encoded path?query or opaque?query + * string that would be used in an HTTP request for u. + */ + requestURI(): string + } + interface URL { + /** + * Hostname returns u.Host, stripping any valid port number if present. + * + * If the result is enclosed in square brackets, as literal IPv6 addresses are, + * the square brackets are removed from the result. + */ + hostname(): string + } + interface URL { + /** + * Port returns the port part of u.Host, without the leading colon. + * + * If u.Host doesn't contain a valid numeric port, Port returns an empty string. + */ + port(): string + } + interface URL { + marshalBinary(): string|Array + } + interface URL { + unmarshalBinary(text: string|Array): void + } + interface URL { + /** + * JoinPath returns a new [URL] with the provided path elements joined to + * any existing path and the resulting path cleaned of any ./ or ../ elements. + * Any sequences of multiple / characters will be reduced to a single /. + */ + joinPath(...elem: string[]): (URL) + } +} + +/** + * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. + * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. + */ +namespace cobra { + interface PositionalArgs {(cmd: Command, args: Array): void } + // @ts-ignore + import flag = pflag + /** + * FParseErrWhitelist configures Flag parse errors to be ignored + */ + interface FParseErrWhitelist extends _TygojaAny{} + /** + * Group Structure to manage groups for commands + */ + interface Group { + id: string + title: string + } + /** + * ShellCompDirective is a bit map representing the different behaviors the shell + * can be instructed to have once completions have been provided. + */ + interface ShellCompDirective extends Number{} + /** + * CompletionOptions are the options to control shell completion + */ + interface CompletionOptions { + /** + * DisableDefaultCmd prevents Cobra from creating a default 'completion' command + */ + disableDefaultCmd: boolean + /** + * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + * for shells that support completion descriptions + */ + disableNoDescFlag: boolean + /** + * DisableDescriptions turns off all completion descriptions for shells + * that support them + */ + disableDescriptions: boolean + /** + * HiddenDefaultCmd makes the default 'completion' command hidden + */ + hiddenDefaultCmd: boolean + } +} + +namespace store { + /** + * Store defines a concurrent safe in memory key-value data store. + */ + interface Store { + } + interface Store { + /** + * Reset clears the store and replaces the store data with a + * shallow copy of the provided newData. + */ + reset(newData: _TygojaDict): void + } + interface Store { + /** + * Length returns the current number of elements in the store. + */ + length(): number + } + interface Store { + /** + * RemoveAll removes all the existing store entries. + */ + removeAll(): void + } + interface Store { + /** + * Remove removes a single entry from the store. + * + * Remove does nothing if key doesn't exist in the store. + */ + remove(key: string): void + } + interface Store { + /** + * Has checks if element with the specified key exist or not. + */ + has(key: string): boolean + } + interface Store { + /** + * Get returns a single element value from the store. + * + * If key is not set, the zero T value is returned. + */ + get(key: string): T + } + interface Store { + /** + * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not. + */ + getOk(key: string): [T, boolean] + } + interface Store { + /** + * GetAll returns a shallow copy of the current store data. + */ + getAll(): _TygojaDict + } + interface Store { + /** + * Values returns a slice with all of the current store values. + */ + values(): Array + } + interface Store { + /** + * Set sets (or overwrite if already exist) a new value for key. + */ + set(key: string, value: T): void + } + interface Store { + /** + * GetOrSet retrieves a single existing value for the provided key + * or stores a new one if it doesn't exist. + */ + getOrSet(key: string, setFunc: () => T): T + } + interface Store { + /** + * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. + * + * This method is similar to Set() but **it will skip adding new elements** + * to the store if the store length has reached the specified limit. + * false is returned if maxAllowedElements limit is reached. + */ + setIfLessThanLimit(key: string, value: T, maxAllowedElements: number): boolean + } + interface Store { + /** + * UnmarshalJSON implements [json.Unmarshaler] and imports the + * provided JSON data into the store. + * + * The store entries that match with the ones from the data will be overwritten with the new value. + */ + unmarshalJSON(data: string|Array): void + } + interface Store { + /** + * MarshalJSON implements [json.Marshaler] and export the current + * store data into valid JSON. + */ + marshalJSON(): string|Array + } +} + +namespace subscriptions { + /** + * Broker defines a struct for managing subscriptions clients. + */ + interface Broker { + } + interface Broker { + /** + * Clients returns a shallow copy of all registered clients indexed + * with their connection id. + */ + clients(): _TygojaDict + } + interface Broker { + /** + * ChunkedClients splits the current clients into a chunked slice. + */ + chunkedClients(chunkSize: number): Array> + } + interface Broker { + /** + * ClientById finds a registered client by its id. + * + * Returns non-nil error when client with clientId is not registered. + */ + clientById(clientId: string): Client + } + interface Broker { + /** + * Register adds a new client to the broker instance. + */ + register(client: Client): void + } + interface Broker { + /** + * Unregister removes a single client by its id. + * + * If client with clientId doesn't exist, this method does nothing. + */ + unregister(clientId: string): void + } +} + /** * Package textproto implements generic support for text-based request/response * protocols in the style of HTTP, NNTP, and SMTP. @@ -15000,709 +14967,6 @@ namespace multipart { } } -/** - * Package http provides HTTP client and server implementations. - * - * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: - * - * ``` - * resp, err := http.Get("http://example.com/") - * ... - * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) - * ... - * resp, err := http.PostForm("http://example.com/form", - * url.Values{"key": {"Value"}, "id": {"123"}}) - * ``` - * - * The caller must close the response body when finished with it: - * - * ``` - * resp, err := http.Get("http://example.com/") - * if err != nil { - * // handle error - * } - * defer resp.Body.Close() - * body, err := io.ReadAll(resp.Body) - * // ... - * ``` - * - * # Clients and Transports - * - * For control over HTTP client headers, redirect policy, and other - * settings, create a [Client]: - * - * ``` - * client := &http.Client{ - * CheckRedirect: redirectPolicyFunc, - * } - * - * resp, err := client.Get("http://example.com") - * // ... - * - * req, err := http.NewRequest("GET", "http://example.com", nil) - * // ... - * req.Header.Add("If-None-Match", `W/"wyzzy"`) - * resp, err := client.Do(req) - * // ... - * ``` - * - * For control over proxies, TLS configuration, keep-alives, - * compression, and other settings, create a [Transport]: - * - * ``` - * tr := &http.Transport{ - * MaxIdleConns: 10, - * IdleConnTimeout: 30 * time.Second, - * DisableCompression: true, - * } - * client := &http.Client{Transport: tr} - * resp, err := client.Get("https://example.com") - * ``` - * - * Clients and Transports are safe for concurrent use by multiple - * goroutines and for efficiency should only be created once and re-used. - * - * # Servers - * - * ListenAndServe starts an HTTP server with a given address and handler. - * The handler is usually nil, which means to use [DefaultServeMux]. - * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: - * - * ``` - * http.Handle("/foo", fooHandler) - * - * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { - * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) - * }) - * - * log.Fatal(http.ListenAndServe(":8080", nil)) - * ``` - * - * More control over the server's behavior is available by creating a - * custom Server: - * - * ``` - * s := &http.Server{ - * Addr: ":8080", - * Handler: myHandler, - * ReadTimeout: 10 * time.Second, - * WriteTimeout: 10 * time.Second, - * MaxHeaderBytes: 1 << 20, - * } - * log.Fatal(s.ListenAndServe()) - * ``` - * - * # HTTP/2 - * - * Starting with Go 1.6, the http package has transparent support for the - * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 - * can do so by setting [Transport.TLSNextProto] (for clients) or - * [Server.TLSNextProto] (for servers) to a non-nil, empty - * map. Alternatively, the following GODEBUG settings are - * currently supported: - * - * ``` - * GODEBUG=http2client=0 # disable HTTP/2 client support - * GODEBUG=http2server=0 # disable HTTP/2 server support - * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs - * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps - * ``` - * - * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug - * - * The http package's [Transport] and [Server] both automatically enable - * HTTP/2 support for simple configurations. To enable HTTP/2 for more - * complex configurations, to use lower-level HTTP/2 features, or to use - * a newer version of Go's http2 package, import "golang.org/x/net/http2" - * directly and use its ConfigureTransport and/or ConfigureServer - * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 - * package takes precedence over the net/http package's built-in HTTP/2 - * support. - */ -namespace http { - /** - * A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an - * HTTP response or the Cookie header of an HTTP request. - * - * See https://tools.ietf.org/html/rfc6265 for details. - */ - interface Cookie { - name: string - value: string - quoted: boolean // indicates whether the Value was originally quoted - path: string // optional - domain: string // optional - expires: time.Time // optional - rawExpires: string // for reading cookies only - /** - * MaxAge=0 means no 'Max-Age' attribute specified. - * MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' - * MaxAge>0 means Max-Age attribute present and given in seconds - */ - maxAge: number - secure: boolean - httpOnly: boolean - sameSite: SameSite - partitioned: boolean - raw: string - unparsed: Array // Raw text of unparsed attribute-value pairs - } - interface Cookie { - /** - * String returns the serialization of the cookie for use in a [Cookie] - * header (if only Name and Value are set) or a Set-Cookie response - * header (if other fields are set). - * If c is nil or c.Name is invalid, the empty string is returned. - */ - string(): string - } - interface Cookie { - /** - * Valid reports whether the cookie is valid. - */ - valid(): void - } - // @ts-ignore - import mathrand = rand - /** - * A Header represents the key-value pairs in an HTTP header. - * - * The keys should be in canonical form, as returned by - * [CanonicalHeaderKey]. - */ - interface Header extends _TygojaDict{} - interface Header { - /** - * Add adds the key, value pair to the header. - * It appends to any existing values associated with key. - * The key is case insensitive; it is canonicalized by - * [CanonicalHeaderKey]. - */ - add(key: string, value: string): void - } - interface Header { - /** - * Set sets the header entries associated with key to the - * single element value. It replaces any existing values - * associated with key. The key is case insensitive; it is - * canonicalized by [textproto.CanonicalMIMEHeaderKey]. - * To use non-canonical keys, assign to the map directly. - */ - set(key: string, value: string): void - } - interface Header { - /** - * Get gets the first value associated with the given key. If - * there are no values associated with the key, Get returns "". - * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is - * used to canonicalize the provided key. Get assumes that all - * keys are stored in canonical form. To use non-canonical keys, - * access the map directly. - */ - get(key: string): string - } - interface Header { - /** - * Values returns all values associated with the given key. - * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is - * used to canonicalize the provided key. To use non-canonical - * keys, access the map directly. - * The returned slice is not a copy. - */ - values(key: string): Array - } - interface Header { - /** - * Del deletes the values associated with key. - * The key is case insensitive; it is canonicalized by - * [CanonicalHeaderKey]. - */ - del(key: string): void - } - interface Header { - /** - * Write writes a header in wire format. - */ - write(w: io.Writer): void - } - interface Header { - /** - * Clone returns a copy of h or nil if h is nil. - */ - clone(): Header - } - interface Header { - /** - * WriteSubset writes a header in wire format. - * If exclude is not nil, keys where exclude[key] == true are not written. - * Keys are not canonicalized before checking the exclude map. - */ - writeSubset(w: io.Writer, exclude: _TygojaDict): void - } - // @ts-ignore - import urlpkg = url - /** - * Response represents the response from an HTTP request. - * - * The [Client] and [Transport] return Responses from servers once - * the response headers have been received. The response body - * is streamed on demand as the Body field is read. - */ - interface Response { - status: string // e.g. "200 OK" - statusCode: number // e.g. 200 - proto: string // e.g. "HTTP/1.0" - protoMajor: number // e.g. 1 - protoMinor: number // e.g. 0 - /** - * Header maps header keys to values. If the response had multiple - * headers with the same key, they may be concatenated, with comma - * delimiters. (RFC 7230, section 3.2.2 requires that multiple headers - * be semantically equivalent to a comma-delimited sequence.) When - * Header values are duplicated by other fields in this struct (e.g., - * ContentLength, TransferEncoding, Trailer), the field values are - * authoritative. - * - * Keys in the map are canonicalized (see CanonicalHeaderKey). - */ - header: Header - /** - * Body represents the response body. - * - * The response body is streamed on demand as the Body field - * is read. If the network connection fails or the server - * terminates the response, Body.Read calls return an error. - * - * The http Client and Transport guarantee that Body is always - * non-nil, even on responses without a body or responses with - * a zero-length body. It is the caller's responsibility to - * close Body. The default HTTP client's Transport may not - * reuse HTTP/1.x "keep-alive" TCP connections if the Body is - * not read to completion and closed. - * - * The Body is automatically dechunked if the server replied - * with a "chunked" Transfer-Encoding. - * - * As of Go 1.12, the Body will also implement io.Writer - * on a successful "101 Switching Protocols" response, - * as used by WebSockets and HTTP/2's "h2c" mode. - */ - body: io.ReadCloser - /** - * ContentLength records the length of the associated content. The - * value -1 indicates that the length is unknown. Unless Request.Method - * is "HEAD", values >= 0 indicate that the given number of bytes may - * be read from Body. - */ - contentLength: number - /** - * Contains transfer encodings from outer-most to inner-most. Value is - * nil, means that "identity" encoding is used. - */ - transferEncoding: Array - /** - * Close records whether the header directed that the connection be - * closed after reading Body. The value is advice for clients: neither - * ReadResponse nor Response.Write ever closes a connection. - */ - close: boolean - /** - * Uncompressed reports whether the response was sent compressed but - * was decompressed by the http package. When true, reading from - * Body yields the uncompressed content instead of the compressed - * content actually set from the server, ContentLength is set to -1, - * and the "Content-Length" and "Content-Encoding" fields are deleted - * from the responseHeader. To get the original response from - * the server, set Transport.DisableCompression to true. - */ - uncompressed: boolean - /** - * Trailer maps trailer keys to values in the same - * format as Header. - * - * The Trailer initially contains only nil values, one for - * each key specified in the server's "Trailer" header - * value. Those values are not added to Header. - * - * Trailer must not be accessed concurrently with Read calls - * on the Body. - * - * After Body.Read has returned io.EOF, Trailer will contain - * any trailer values sent by the server. - */ - trailer: Header - /** - * Request is the request that was sent to obtain this Response. - * Request's Body is nil (having already been consumed). - * This is only populated for Client requests. - */ - request?: Request - /** - * TLS contains information about the TLS connection on which the - * response was received. It is nil for unencrypted responses. - * The pointer is shared between responses and should not be - * modified. - */ - tls?: any - } - interface Response { - /** - * Cookies parses and returns the cookies set in the Set-Cookie headers. - */ - cookies(): Array<(Cookie | undefined)> - } - interface Response { - /** - * Location returns the URL of the response's "Location" header, - * if present. Relative redirects are resolved relative to - * [Response.Request]. [ErrNoLocation] is returned if no - * Location header is present. - */ - location(): (url.URL) - } - interface Response { - /** - * ProtoAtLeast reports whether the HTTP protocol used - * in the response is at least major.minor. - */ - protoAtLeast(major: number, minor: number): boolean - } - interface Response { - /** - * Write writes r to w in the HTTP/1.x server response format, - * including the status line, headers, body, and optional trailer. - * - * This method consults the following fields of the response r: - * - * ``` - * StatusCode - * ProtoMajor - * ProtoMinor - * Request.Method - * TransferEncoding - * Trailer - * Body - * ContentLength - * Header, values for non-canonical keys will have unpredictable behavior - * ``` - * - * The Response Body is closed after it is sent. - */ - write(w: io.Writer): void - } -} - -namespace store { - /** - * Store defines a concurrent safe in memory key-value data store. - */ - interface Store { - } - interface Store { - /** - * Reset clears the store and replaces the store data with a - * shallow copy of the provided newData. - */ - reset(newData: _TygojaDict): void - } - interface Store { - /** - * Length returns the current number of elements in the store. - */ - length(): number - } - interface Store { - /** - * RemoveAll removes all the existing store entries. - */ - removeAll(): void - } - interface Store { - /** - * Remove removes a single entry from the store. - * - * Remove does nothing if key doesn't exist in the store. - */ - remove(key: string): void - } - interface Store { - /** - * Has checks if element with the specified key exist or not. - */ - has(key: string): boolean - } - interface Store { - /** - * Get returns a single element value from the store. - * - * If key is not set, the zero T value is returned. - */ - get(key: string): T - } - interface Store { - /** - * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not. - */ - getOk(key: string): [T, boolean] - } - interface Store { - /** - * GetAll returns a shallow copy of the current store data. - */ - getAll(): _TygojaDict - } - interface Store { - /** - * Values returns a slice with all of the current store values. - */ - values(): Array - } - interface Store { - /** - * Set sets (or overwrite if already exist) a new value for key. - */ - set(key: string, value: T): void - } - interface Store { - /** - * GetOrSet retrieves a single existing value for the provided key - * or stores a new one if it doesn't exist. - */ - getOrSet(key: string, setFunc: () => T): T - } - interface Store { - /** - * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. - * - * This method is similar to Set() but **it will skip adding new elements** - * to the store if the store length has reached the specified limit. - * false is returned if maxAllowedElements limit is reached. - */ - setIfLessThanLimit(key: string, value: T, maxAllowedElements: number): boolean - } - interface Store { - /** - * UnmarshalJSON implements [json.Unmarshaler] and imports the - * provided JSON data into the store. - * - * The store entries that match with the ones from the data will be overwritten with the new value. - */ - unmarshalJSON(data: string|Array): void - } - interface Store { - /** - * MarshalJSON implements [json.Marshaler] and export the current - * store data into valid JSON. - */ - marshalJSON(): string|Array - } -} - -/** - * Package cron implements a crontab-like service to execute and schedule - * repeative tasks/jobs. - * - * Example: - * - * ``` - * c := cron.New() - * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) - * c.Start() - * ``` - */ -namespace cron { - /** - * Cron is a crontab-like struct for tasks/jobs scheduling. - */ - interface Cron { - } - interface Cron { - /** - * SetInterval changes the current cron tick interval - * (it usually should be >= 1 minute). - */ - setInterval(d: time.Duration): void - } - interface Cron { - /** - * SetTimezone changes the current cron tick timezone. - */ - setTimezone(l: time.Location): void - } - interface Cron { - /** - * MustAdd is similar to Add() but panic on failure. - */ - mustAdd(jobId: string, cronExpr: string, run: () => void): void - } - interface Cron { - /** - * Add registers a single cron job. - * - * If there is already a job with the provided id, then the old job - * will be replaced with the new one. - * - * cronExpr is a regular cron expression, eg. "0 *\/3 * * *" (aka. at minute 0 past every 3rd hour). - * Check cron.NewSchedule() for the supported tokens. - */ - add(jobId: string, cronExpr: string, run: () => void): void - } - interface Cron { - /** - * Remove removes a single cron job by its id. - */ - remove(jobId: string): void - } - interface Cron { - /** - * RemoveAll removes all registered cron jobs. - */ - removeAll(): void - } - interface Cron { - /** - * Total returns the current total number of registered cron jobs. - */ - total(): number - } - interface Cron { - /** - * Stop stops the current cron ticker (if not already). - * - * You can resume the ticker by calling Start(). - */ - stop(): void - } - interface Cron { - /** - * Start starts the cron ticker. - * - * Calling Start() on already started cron will restart the ticker. - */ - start(): void - } - interface Cron { - /** - * HasStarted checks whether the current Cron ticker has been started. - */ - hasStarted(): boolean - } -} - -namespace hook { - /** - * Hook defines a generic concurrent safe structure for managing event hooks. - * - * When using custom a event it must embed the base [hook.Event]. - * - * Example: - * - * ``` - * type CustomEvent struct { - * hook.Event - * SomeField int - * } - * - * h := Hook[*CustomEvent]{} - * - * h.BindFunc(func(e *CustomEvent) error { - * println(e.SomeField) - * - * return e.Next() - * }) - * - * h.Trigger(&CustomEvent{ SomeField: 123 }) - * ``` - */ - interface Hook { - } - interface Hook { - /** - * Bind registers the provided handler to the current hooks queue. - * - * If handler.Id is empty it is updated with autogenerated value. - * - * If a handler from the current hook list has Id matching handler.Id - * then the old handler is replaced with the new one. - */ - bind(handler: Handler): string - } - interface Hook { - /** - * BindFunc is similar to Bind but registers a new handler from just the provided function. - * - * The registered handler is added with a default 0 priority and the id will be autogenerated. - * - * If you want to register a handler with custom priority or id use the [Hook.Bind] method. - */ - bindFunc(fn: HandlerFunc): string - } - interface Hook { - /** - * Unbind removes a single hook handler by its id. - */ - unbind(id: string): void - } - interface Hook { - /** - * UnbindAll removes all registered handlers. - */ - unbindAll(): void - } - interface Hook { - /** - * Length returns to total number of registered hook handlers. - */ - length(): number - } - interface Hook { - /** - * Trigger executes all registered hook handlers one by one - * with the specified event as an argument. - * - * Optionally, this method allows also to register additional one off - * handlers that will be temporary appended to the handlers queue. - * - * NB! Each hook handler must call event.Next() in order the hook chain to proceed. - */ - trigger(event: T, ...oneOffHandlers: HandlerFunc[]): void - } - /** - * TaggedHook defines a proxy hook which register handlers that are triggered only - * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). - */ - type _subPqnJU = mainHook - interface TaggedHook extends _subPqnJU { - } - interface TaggedHook { - /** - * CanTriggerOn checks if the current TaggedHook can be triggered with - * the provided event data tags. - * - * It returns always true if the hook doens't have any tags. - */ - canTriggerOn(tagsToCheck: Array): boolean - } - interface TaggedHook { - /** - * Bind registers the provided handler to the current hooks queue. - * - * It is similar to [Hook.Bind] with the difference that the handler - * function is invoked only if the event data tags satisfy h.CanTriggerOn. - */ - bind(handler: Handler): string - } - interface TaggedHook { - /** - * BindFunc registers a new handler with the specified function. - * - * It is similar to [Hook.Bind] with the difference that the handler - * function is invoked only if the event data tags satisfy h.CanTriggerOn. - */ - bindFunc(fn: HandlerFunc): string - } -} - /** * Package slog provides structured logging, * in which log records include a message, @@ -16193,17 +15457,733 @@ namespace slog { } } -namespace mailer { +/** + * Package sql provides a generic interface around SQL (or SQL-like) + * databases. + * + * The sql package must be used in conjunction with a database driver. + * See https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until + * after the query is completed. + * + * For usage examples, see the wiki page at + * https://golang.org/s/sqlwiki. + */ +namespace sql { /** - * Mailer defines a base mail client interface. + * IsolationLevel is the transaction isolation level used in [TxOptions]. */ - interface Mailer { - [key:string]: any; + interface IsolationLevel extends Number{} + interface IsolationLevel { /** - * Send sends an email with the provided Message. + * String returns the name of the transaction isolation level. */ - send(message: Message): void + string(): string } + /** + * DBStats contains database statistics. + */ + interface DBStats { + maxOpenConnections: number // Maximum number of open connections to the database. + /** + * Pool Status + */ + openConnections: number // The number of established connections both in use and idle. + inUse: number // The number of connections currently in use. + idle: number // The number of idle connections. + /** + * Counters + */ + waitCount: number // The total number of connections waited for. + waitDuration: time.Duration // The total time blocked waiting for a new connection. + maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. + maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. + maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. + } + /** + * Conn represents a single database connection rather than a pool of database + * connections. Prefer running queries from [DB] unless there is a specific + * need for a continuous single database connection. + * + * A Conn must call [Conn.Close] to return the connection to the database pool + * and may do so concurrently with a running query. + * + * After a call to [Conn.Close], all operations on the + * connection fail with [ErrConnDone]. + */ + interface Conn { + } + interface Conn { + /** + * PingContext verifies the connection to the database is still alive. + */ + pingContext(ctx: context.Context): void + } + interface Conn { + /** + * ExecContext executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Conn { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface Conn { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * the [*Row.Scan] method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface Conn { + /** + * PrepareContext creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface Conn { + /** + * Raw executes f exposing the underlying driver connection for the + * duration of f. The driverConn must not be used outside of f. + * + * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable + * until [Conn.Close] is called. + */ + raw(f: (driverConn: any) => void): void + } + interface Conn { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled back. + * If the context is canceled, the sql package will roll back + * the transaction. [Tx.Commit] will return an error if the context provided to + * BeginTx is canceled. + * + * The provided [TxOptions] is optional and may be nil if defaults should be used. + * If a non-default isolation level is used that the driver doesn't support, + * an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): (Tx) + } + interface Conn { + /** + * Close returns the connection to the connection pool. + * All operations after a Close will return with [ErrConnDone]. + * Close is safe to call concurrently with other operations and will + * block until all other operations finish. It may be useful to first + * cancel any used context and then call close directly after. + */ + close(): void + } + /** + * ColumnType contains the name and type of a column. + */ + interface ColumnType { + } + interface ColumnType { + /** + * Name returns the name or alias of the column. + */ + name(): string + } + interface ColumnType { + /** + * Length returns the column type length for variable length column types such + * as text and binary field types. If the type length is unbounded the value will + * be [math.MaxInt64] (any database limits will still apply). + * If the column type is not variable length, such as an int, or if not supported + * by the driver ok is false. + */ + length(): [number, boolean] + } + interface ColumnType { + /** + * DecimalSize returns the scale and precision of a decimal type. + * If not applicable or if not supported ok is false. + */ + decimalSize(): [number, boolean] + } + interface ColumnType { + /** + * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. + * If a driver does not support this property ScanType will return + * the type of an empty interface. + */ + scanType(): any + } + interface ColumnType { + /** + * Nullable reports whether the column may be null. + * If a driver does not support this property ok will be false. + */ + nullable(): boolean + } + interface ColumnType { + /** + * DatabaseTypeName returns the database system name of the column type. If an empty + * string is returned, then the driver type name is not supported. + * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers + * are not included. + * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", + * "INT", and "BIGINT". + */ + databaseTypeName(): string + } + /** + * Row is the result of calling [DB.QueryRow] to select a single row. + */ + interface Row { + } + interface Row { + /** + * Scan copies the columns from the matched row into the values + * pointed at by dest. See the documentation on [Rows.Scan] for details. + * If more than one row matches the query, + * Scan uses the first row and discards the rest. If no row matches + * the query, Scan returns [ErrNoRows]. + */ + scan(...dest: any[]): void + } + interface Row { + /** + * Err provides a way for wrapping packages to check for + * query errors without calling [Row.Scan]. + * Err returns the error, if any, that was encountered while running the query. + * If this error is not nil, this error will also be returned from [Row.Scan]. + */ + err(): void + } +} + +/** + * Package http provides HTTP client and server implementations. + * + * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: + * + * ``` + * resp, err := http.Get("http://example.com/") + * ... + * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) + * ... + * resp, err := http.PostForm("http://example.com/form", + * url.Values{"key": {"Value"}, "id": {"123"}}) + * ``` + * + * The caller must close the response body when finished with it: + * + * ``` + * resp, err := http.Get("http://example.com/") + * if err != nil { + * // handle error + * } + * defer resp.Body.Close() + * body, err := io.ReadAll(resp.Body) + * // ... + * ``` + * + * # Clients and Transports + * + * For control over HTTP client headers, redirect policy, and other + * settings, create a [Client]: + * + * ``` + * client := &http.Client{ + * CheckRedirect: redirectPolicyFunc, + * } + * + * resp, err := client.Get("http://example.com") + * // ... + * + * req, err := http.NewRequest("GET", "http://example.com", nil) + * // ... + * req.Header.Add("If-None-Match", `W/"wyzzy"`) + * resp, err := client.Do(req) + * // ... + * ``` + * + * For control over proxies, TLS configuration, keep-alives, + * compression, and other settings, create a [Transport]: + * + * ``` + * tr := &http.Transport{ + * MaxIdleConns: 10, + * IdleConnTimeout: 30 * time.Second, + * DisableCompression: true, + * } + * client := &http.Client{Transport: tr} + * resp, err := client.Get("https://example.com") + * ``` + * + * Clients and Transports are safe for concurrent use by multiple + * goroutines and for efficiency should only be created once and re-used. + * + * # Servers + * + * ListenAndServe starts an HTTP server with a given address and handler. + * The handler is usually nil, which means to use [DefaultServeMux]. + * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: + * + * ``` + * http.Handle("/foo", fooHandler) + * + * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { + * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) + * }) + * + * log.Fatal(http.ListenAndServe(":8080", nil)) + * ``` + * + * More control over the server's behavior is available by creating a + * custom Server: + * + * ``` + * s := &http.Server{ + * Addr: ":8080", + * Handler: myHandler, + * ReadTimeout: 10 * time.Second, + * WriteTimeout: 10 * time.Second, + * MaxHeaderBytes: 1 << 20, + * } + * log.Fatal(s.ListenAndServe()) + * ``` + * + * # HTTP/2 + * + * Starting with Go 1.6, the http package has transparent support for the + * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 + * can do so by setting [Transport.TLSNextProto] (for clients) or + * [Server.TLSNextProto] (for servers) to a non-nil, empty + * map. Alternatively, the following GODEBUG settings are + * currently supported: + * + * ``` + * GODEBUG=http2client=0 # disable HTTP/2 client support + * GODEBUG=http2server=0 # disable HTTP/2 server support + * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs + * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps + * ``` + * + * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug + * + * The http package's [Transport] and [Server] both automatically enable + * HTTP/2 support for simple configurations. To enable HTTP/2 for more + * complex configurations, to use lower-level HTTP/2 features, or to use + * a newer version of Go's http2 package, import "golang.org/x/net/http2" + * directly and use its ConfigureTransport and/or ConfigureServer + * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 + * package takes precedence over the net/http package's built-in HTTP/2 + * support. + */ +namespace http { + /** + * A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an + * HTTP response or the Cookie header of an HTTP request. + * + * See https://tools.ietf.org/html/rfc6265 for details. + */ + interface Cookie { + name: string + value: string + quoted: boolean // indicates whether the Value was originally quoted + path: string // optional + domain: string // optional + expires: time.Time // optional + rawExpires: string // for reading cookies only + /** + * MaxAge=0 means no 'Max-Age' attribute specified. + * MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' + * MaxAge>0 means Max-Age attribute present and given in seconds + */ + maxAge: number + secure: boolean + httpOnly: boolean + sameSite: SameSite + partitioned: boolean + raw: string + unparsed: Array // Raw text of unparsed attribute-value pairs + } + interface Cookie { + /** + * String returns the serialization of the cookie for use in a [Cookie] + * header (if only Name and Value are set) or a Set-Cookie response + * header (if other fields are set). + * If c is nil or c.Name is invalid, the empty string is returned. + */ + string(): string + } + interface Cookie { + /** + * Valid reports whether the cookie is valid. + */ + valid(): void + } + // @ts-ignore + import mathrand = rand + /** + * A Header represents the key-value pairs in an HTTP header. + * + * The keys should be in canonical form, as returned by + * [CanonicalHeaderKey]. + */ + interface Header extends _TygojaDict{} + interface Header { + /** + * Add adds the key, value pair to the header. + * It appends to any existing values associated with key. + * The key is case insensitive; it is canonicalized by + * [CanonicalHeaderKey]. + */ + add(key: string, value: string): void + } + interface Header { + /** + * Set sets the header entries associated with key to the + * single element value. It replaces any existing values + * associated with key. The key is case insensitive; it is + * canonicalized by [textproto.CanonicalMIMEHeaderKey]. + * To use non-canonical keys, assign to the map directly. + */ + set(key: string, value: string): void + } + interface Header { + /** + * Get gets the first value associated with the given key. If + * there are no values associated with the key, Get returns "". + * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is + * used to canonicalize the provided key. Get assumes that all + * keys are stored in canonical form. To use non-canonical keys, + * access the map directly. + */ + get(key: string): string + } + interface Header { + /** + * Values returns all values associated with the given key. + * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is + * used to canonicalize the provided key. To use non-canonical + * keys, access the map directly. + * The returned slice is not a copy. + */ + values(key: string): Array + } + interface Header { + /** + * Del deletes the values associated with key. + * The key is case insensitive; it is canonicalized by + * [CanonicalHeaderKey]. + */ + del(key: string): void + } + interface Header { + /** + * Write writes a header in wire format. + */ + write(w: io.Writer): void + } + interface Header { + /** + * Clone returns a copy of h or nil if h is nil. + */ + clone(): Header + } + interface Header { + /** + * WriteSubset writes a header in wire format. + * If exclude is not nil, keys where exclude[key] == true are not written. + * Keys are not canonicalized before checking the exclude map. + */ + writeSubset(w: io.Writer, exclude: _TygojaDict): void + } + // @ts-ignore + import urlpkg = url + /** + * Response represents the response from an HTTP request. + * + * The [Client] and [Transport] return Responses from servers once + * the response headers have been received. The response body + * is streamed on demand as the Body field is read. + */ + interface Response { + status: string // e.g. "200 OK" + statusCode: number // e.g. 200 + proto: string // e.g. "HTTP/1.0" + protoMajor: number // e.g. 1 + protoMinor: number // e.g. 0 + /** + * Header maps header keys to values. If the response had multiple + * headers with the same key, they may be concatenated, with comma + * delimiters. (RFC 7230, section 3.2.2 requires that multiple headers + * be semantically equivalent to a comma-delimited sequence.) When + * Header values are duplicated by other fields in this struct (e.g., + * ContentLength, TransferEncoding, Trailer), the field values are + * authoritative. + * + * Keys in the map are canonicalized (see CanonicalHeaderKey). + */ + header: Header + /** + * Body represents the response body. + * + * The response body is streamed on demand as the Body field + * is read. If the network connection fails or the server + * terminates the response, Body.Read calls return an error. + * + * The http Client and Transport guarantee that Body is always + * non-nil, even on responses without a body or responses with + * a zero-length body. It is the caller's responsibility to + * close Body. The default HTTP client's Transport may not + * reuse HTTP/1.x "keep-alive" TCP connections if the Body is + * not read to completion and closed. + * + * The Body is automatically dechunked if the server replied + * with a "chunked" Transfer-Encoding. + * + * As of Go 1.12, the Body will also implement io.Writer + * on a successful "101 Switching Protocols" response, + * as used by WebSockets and HTTP/2's "h2c" mode. + */ + body: io.ReadCloser + /** + * ContentLength records the length of the associated content. The + * value -1 indicates that the length is unknown. Unless Request.Method + * is "HEAD", values >= 0 indicate that the given number of bytes may + * be read from Body. + */ + contentLength: number + /** + * Contains transfer encodings from outer-most to inner-most. Value is + * nil, means that "identity" encoding is used. + */ + transferEncoding: Array + /** + * Close records whether the header directed that the connection be + * closed after reading Body. The value is advice for clients: neither + * ReadResponse nor Response.Write ever closes a connection. + */ + close: boolean + /** + * Uncompressed reports whether the response was sent compressed but + * was decompressed by the http package. When true, reading from + * Body yields the uncompressed content instead of the compressed + * content actually set from the server, ContentLength is set to -1, + * and the "Content-Length" and "Content-Encoding" fields are deleted + * from the responseHeader. To get the original response from + * the server, set Transport.DisableCompression to true. + */ + uncompressed: boolean + /** + * Trailer maps trailer keys to values in the same + * format as Header. + * + * The Trailer initially contains only nil values, one for + * each key specified in the server's "Trailer" header + * value. Those values are not added to Header. + * + * Trailer must not be accessed concurrently with Read calls + * on the Body. + * + * After Body.Read has returned io.EOF, Trailer will contain + * any trailer values sent by the server. + */ + trailer: Header + /** + * Request is the request that was sent to obtain this Response. + * Request's Body is nil (having already been consumed). + * This is only populated for Client requests. + */ + request?: Request + /** + * TLS contains information about the TLS connection on which the + * response was received. It is nil for unencrypted responses. + * The pointer is shared between responses and should not be + * modified. + */ + tls?: any + } + interface Response { + /** + * Cookies parses and returns the cookies set in the Set-Cookie headers. + */ + cookies(): Array<(Cookie | undefined)> + } + interface Response { + /** + * Location returns the URL of the response's "Location" header, + * if present. Relative redirects are resolved relative to + * [Response.Request]. [ErrNoLocation] is returned if no + * Location header is present. + */ + location(): (url.URL) + } + interface Response { + /** + * ProtoAtLeast reports whether the HTTP protocol used + * in the response is at least major.minor. + */ + protoAtLeast(major: number, minor: number): boolean + } + interface Response { + /** + * Write writes r to w in the HTTP/1.x server response format, + * including the status line, headers, body, and optional trailer. + * + * This method consults the following fields of the response r: + * + * ``` + * StatusCode + * ProtoMajor + * ProtoMinor + * Request.Method + * TransferEncoding + * Trailer + * Body + * ContentLength + * Header, values for non-canonical keys will have unpredictable behavior + * ``` + * + * The Response Body is closed after it is sent. + */ + write(w: io.Writer): void + } +} + +namespace hook { + /** + * Hook defines a generic concurrent safe structure for managing event hooks. + * + * When using custom a event it must embed the base [hook.Event]. + * + * Example: + * + * ``` + * type CustomEvent struct { + * hook.Event + * SomeField int + * } + * + * h := Hook[*CustomEvent]{} + * + * h.BindFunc(func(e *CustomEvent) error { + * println(e.SomeField) + * + * return e.Next() + * }) + * + * h.Trigger(&CustomEvent{ SomeField: 123 }) + * ``` + */ + interface Hook { + } + interface Hook { + /** + * Bind registers the provided handler to the current hooks queue. + * + * If handler.Id is empty it is updated with autogenerated value. + * + * If a handler from the current hook list has Id matching handler.Id + * then the old handler is replaced with the new one. + */ + bind(handler: Handler): string + } + interface Hook { + /** + * BindFunc is similar to Bind but registers a new handler from just the provided function. + * + * The registered handler is added with a default 0 priority and the id will be autogenerated. + * + * If you want to register a handler with custom priority or id use the [Hook.Bind] method. + */ + bindFunc(fn: HandlerFunc): string + } + interface Hook { + /** + * Unbind removes a single hook handler by its id. + */ + unbind(id: string): void + } + interface Hook { + /** + * UnbindAll removes all registered handlers. + */ + unbindAll(): void + } + interface Hook { + /** + * Length returns to total number of registered hook handlers. + */ + length(): number + } + interface Hook { + /** + * Trigger executes all registered hook handlers one by one + * with the specified event as an argument. + * + * Optionally, this method allows also to register additional one off + * handlers that will be temporary appended to the handlers queue. + * + * NB! Each hook handler must call event.Next() in order the hook chain to proceed. + */ + trigger(event: T, ...oneOffHandlers: HandlerFunc[]): void + } + /** + * TaggedHook defines a proxy hook which register handlers that are triggered only + * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). + */ + type _subJOsAo = mainHook + interface TaggedHook extends _subJOsAo { + } + interface TaggedHook { + /** + * CanTriggerOn checks if the current TaggedHook can be triggered with + * the provided event data tags. + * + * It returns always true if the hook doens't have any tags. + */ + canTriggerOn(tagsToCheck: Array): boolean + } + interface TaggedHook { + /** + * Bind registers the provided handler to the current hooks queue. + * + * It is similar to [Hook.Bind] with the difference that the handler + * function is invoked only if the event data tags satisfy h.CanTriggerOn. + */ + bind(handler: Handler): string + } + interface TaggedHook { + /** + * BindFunc registers a new handler with the specified function. + * + * It is similar to [Hook.Bind] with the difference that the handler + * function is invoked only if the event data tags satisfy h.CanTriggerOn. + */ + bindFunc(fn: HandlerFunc): string + } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { } namespace router { @@ -16215,8 +16195,8 @@ namespace router { * * NB! It is expected that the Response and Request fields are always set. */ - type _subPBHhi = hook.Event - interface Event extends _subPBHhi { + type _subFwDcZ = hook.Event + interface Event extends _subFwDcZ { response: http.ResponseWriter request?: http.Request } @@ -16284,6 +16264,13 @@ namespace router { */ unsafeRealIP(): string } + interface Event { + /** + * FindUploadedFiles extracts all form files of "key" from a http request + * and returns a slice with filesystem.File instances (if any). + */ + findUploadedFiles(key: string): Array<(filesystem.File | undefined)> + } interface Event { /** * Get retrieves single value from the current event data store. @@ -16521,46 +16508,107 @@ namespace router { } } -namespace subscriptions { +/** + * Package cron implements a crontab-like service to execute and schedule + * repeative tasks/jobs. + * + * Example: + * + * ``` + * c := cron.New() + * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) + * c.Start() + * ``` + */ +namespace cron { /** - * Broker defines a struct for managing subscriptions clients. + * Cron is a crontab-like struct for tasks/jobs scheduling. */ - interface Broker { + interface Cron { } - interface Broker { + interface Cron { /** - * Clients returns a shallow copy of all registered clients indexed - * with their connection id. + * SetInterval changes the current cron tick interval + * (it usually should be >= 1 minute). */ - clients(): _TygojaDict + setInterval(d: time.Duration): void } - interface Broker { + interface Cron { /** - * ChunkedClients splits the current clients into a chunked slice. + * SetTimezone changes the current cron tick timezone. */ - chunkedClients(chunkSize: number): Array> + setTimezone(l: time.Location): void } - interface Broker { + interface Cron { /** - * ClientById finds a registered client by its id. + * MustAdd is similar to Add() but panic on failure. + */ + mustAdd(jobId: string, cronExpr: string, run: () => void): void + } + interface Cron { + /** + * Add registers a single cron job. * - * Returns non-nil error when client with clientId is not registered. - */ - clientById(clientId: string): Client - } - interface Broker { - /** - * Register adds a new client to the broker instance. - */ - register(client: Client): void - } - interface Broker { - /** - * Unregister removes a single client by its id. + * If there is already a job with the provided id, then the old job + * will be replaced with the new one. * - * If client with clientId doesn't exist, this method does nothing. + * cronExpr is a regular cron expression, eg. "0 *\/3 * * *" (aka. at minute 0 past every 3rd hour). + * Check cron.NewSchedule() for the supported tokens. */ - unregister(clientId: string): void + add(jobId: string, cronExpr: string, run: () => void): void + } + interface Cron { + /** + * Remove removes a single cron job by its id. + */ + remove(jobId: string): void + } + interface Cron { + /** + * RemoveAll removes all registered cron jobs. + */ + removeAll(): void + } + interface Cron { + /** + * Total returns the current total number of registered cron jobs. + */ + total(): number + } + interface Cron { + /** + * Stop stops the current cron ticker (if not already). + * + * You can resume the ticker by calling Start(). + */ + stop(): void + } + interface Cron { + /** + * Start starts the cron ticker. + * + * Calling Start() on already started cron will restart the ticker. + */ + start(): void + } + interface Cron { + /** + * HasStarted checks whether the current Cron ticker has been started. + */ + hasStarted(): boolean + } +} + +namespace mailer { + /** + * Mailer defines a base mail client interface. + */ + interface Mailer { + [key:string]: any; + /** + * Send sends an email with the provided Message. + */ + send(message: Message): void } } @@ -16575,8 +16623,8 @@ namespace core { /** * AuthOrigin defines a Record proxy for working with the authOrigins collection. */ - type _subkIVie = Record - interface AuthOrigin extends _subkIVie { + type _subbuYEL = Record + interface AuthOrigin extends _subbuYEL { } interface AuthOrigin { /** @@ -16648,8 +16696,8 @@ namespace core { /** * Collection defines the table, fields and various options related to a set of records. */ - type _subDkqXF = baseCollection&collectionAuthOptions&collectionViewOptions - interface Collection extends _subDkqXF { + type _subfvorl = baseCollection&collectionAuthOptions&collectionViewOptions + interface Collection extends _subfvorl { } interface Collection { /** @@ -16847,58 +16895,58 @@ namespace core { */ clone(): (RequestInfo) } - type _subBNlNh = RequestEvent - interface BatchRequestEvent extends _subBNlNh { + type _subtbgSM = RequestEvent + interface BatchRequestEvent extends _subtbgSM { batch: Array<(InternalRequest | undefined)> } - type _subnnRAX = hook.Event - interface BootstrapEvent extends _subnnRAX { + type _subHQLJR = hook.Event + interface BootstrapEvent extends _subHQLJR { app: App } - type _subSVUUq = hook.Event - interface TerminateEvent extends _subSVUUq { + type _subCjwpw = hook.Event + interface TerminateEvent extends _subCjwpw { app: App isRestart: boolean } - type _subzSRiv = hook.Event - interface BackupEvent extends _subzSRiv { + type _subnoQer = hook.Event + interface BackupEvent extends _subnoQer { app: App context: context.Context name: string // the name of the backup to create/restore. exclude: Array // list of dir entries to exclude from the backup create/restore. } - type _subGNCOc = hook.Event - interface ServeEvent extends _subGNCOc { + type _subwGaFh = hook.Event + interface ServeEvent extends _subwGaFh { app: App router?: router.Router server?: http.Server certManager?: any } - type _suboOzgW = hook.Event&RequestEvent - interface SettingsListRequestEvent extends _suboOzgW { + type _subYiaKT = hook.Event&RequestEvent + interface SettingsListRequestEvent extends _subYiaKT { settings?: Settings } - type _subuSRkZ = hook.Event&RequestEvent - interface SettingsUpdateRequestEvent extends _subuSRkZ { + type _subgaqNM = hook.Event&RequestEvent + interface SettingsUpdateRequestEvent extends _subgaqNM { oldSettings?: Settings newSettings?: Settings } - type _subPmEmB = hook.Event - interface SettingsReloadEvent extends _subPmEmB { + type _subTEbVL = hook.Event + interface SettingsReloadEvent extends _subTEbVL { app: App } - type _subbEHXk = hook.Event - interface MailerEvent extends _subbEHXk { + type _subPVHTD = hook.Event + interface MailerEvent extends _subPVHTD { app: App mailer: mailer.Mailer message?: mailer.Message } - type _subpCTxH = MailerEvent&baseRecordEventData - interface MailerRecordEvent extends _subpCTxH { + type _subwdqNe = MailerEvent&baseRecordEventData + interface MailerRecordEvent extends _subwdqNe { meta: _TygojaDict } - type _subDpIRL = hook.Event&baseModelEventData - interface ModelEvent extends _subDpIRL { + type _subjHCuu = hook.Event&baseModelEventData + interface ModelEvent extends _subjHCuu { app: App context: context.Context /** @@ -16910,12 +16958,12 @@ namespace core { */ type: string } - type _subaEMaJ = ModelEvent - interface ModelErrorEvent extends _subaEMaJ { + type _subkZVDa = ModelEvent + interface ModelErrorEvent extends _subkZVDa { error: Error } - type _subKLiEq = hook.Event&baseRecordEventData - interface RecordEvent extends _subKLiEq { + type _subNYMRV = hook.Event&baseRecordEventData + interface RecordEvent extends _subNYMRV { app: App context: context.Context /** @@ -16927,12 +16975,12 @@ namespace core { */ type: string } - type _subiGyZm = RecordEvent - interface RecordErrorEvent extends _subiGyZm { + type _subSyPPx = RecordEvent + interface RecordErrorEvent extends _subSyPPx { error: Error } - type _subEFDdz = hook.Event&baseCollectionEventData - interface CollectionEvent extends _subEFDdz { + type _subykCtB = hook.Event&baseCollectionEventData + interface CollectionEvent extends _subykCtB { app: App context: context.Context /** @@ -16944,95 +16992,95 @@ namespace core { */ type: string } - type _subSmHLD = CollectionEvent - interface CollectionErrorEvent extends _subSmHLD { + type _subSlWNa = CollectionEvent + interface CollectionErrorEvent extends _subSlWNa { error: Error } - type _subbQXBO = hook.Event&RequestEvent - interface FileTokenRequestEvent extends _subbQXBO { + type _suboaRFE = hook.Event&RequestEvent + interface FileTokenRequestEvent extends _suboaRFE { token: string } - type _subYMuec = hook.Event&RequestEvent&baseCollectionEventData - interface FileDownloadRequestEvent extends _subYMuec { + type _subxikgC = hook.Event&RequestEvent&baseCollectionEventData + interface FileDownloadRequestEvent extends _subxikgC { record?: Record fileField?: FileField servedPath: string servedName: string } - type _subnnRQj = hook.Event&RequestEvent - interface CollectionsListRequestEvent extends _subnnRQj { + type _subBttwy = hook.Event&RequestEvent + interface CollectionsListRequestEvent extends _subBttwy { collections: Array<(Collection | undefined)> result?: search.Result } - type _subnDfhN = hook.Event&RequestEvent - interface CollectionsImportRequestEvent extends _subnDfhN { + type _subsYWuC = hook.Event&RequestEvent + interface CollectionsImportRequestEvent extends _subsYWuC { collectionsData: Array<_TygojaDict> deleteMissing: boolean } - type _submlAhx = hook.Event&RequestEvent&baseCollectionEventData - interface CollectionRequestEvent extends _submlAhx { + type _subhRLLu = hook.Event&RequestEvent&baseCollectionEventData + interface CollectionRequestEvent extends _subhRLLu { } - type _subxHHlk = hook.Event&RequestEvent - interface RealtimeConnectRequestEvent extends _subxHHlk { + type _subwyXDb = hook.Event&RequestEvent + interface RealtimeConnectRequestEvent extends _subwyXDb { client: subscriptions.Client /** * note: modifying it after the connect has no effect */ idleTimeout: time.Duration } - type _subtuMXT = hook.Event&RequestEvent - interface RealtimeMessageEvent extends _subtuMXT { + type _subEzoAV = hook.Event&RequestEvent + interface RealtimeMessageEvent extends _subEzoAV { client: subscriptions.Client message?: subscriptions.Message } - type _subPSWBK = hook.Event&RequestEvent - interface RealtimeSubscribeRequestEvent extends _subPSWBK { + type _subCejlO = hook.Event&RequestEvent + interface RealtimeSubscribeRequestEvent extends _subCejlO { client: subscriptions.Client subscriptions: Array } - type _subJKFgr = hook.Event&RequestEvent&baseCollectionEventData - interface RecordsListRequestEvent extends _subJKFgr { + type _subEsLns = hook.Event&RequestEvent&baseCollectionEventData + interface RecordsListRequestEvent extends _subEsLns { /** * @todo consider removing and maybe add as generic to the search.Result? */ records: Array<(Record | undefined)> result?: search.Result } - type _subXfSAT = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEvent extends _subXfSAT { + type _subjWftN = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEvent extends _subjWftN { record?: Record } - type _subgZfBm = hook.Event&baseRecordEventData - interface RecordEnrichEvent extends _subgZfBm { + type _subwPuiB = hook.Event&baseRecordEventData + interface RecordEnrichEvent extends _subwPuiB { app: App requestInfo?: RequestInfo } - type _subTYVTo = hook.Event&RequestEvent&baseCollectionEventData - interface RecordCreateOTPRequestEvent extends _subTYVTo { + type _subISYEk = hook.Event&RequestEvent&baseCollectionEventData + interface RecordCreateOTPRequestEvent extends _subISYEk { record?: Record password: string } - type _subJyJES = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOTPRequestEvent extends _subJyJES { + type _subwqbvQ = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOTPRequestEvent extends _subwqbvQ { record?: Record otp?: OTP } - type _submpLNF = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRequestEvent extends _submpLNF { + type _subjpNwz = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRequestEvent extends _subjpNwz { record?: Record token: string meta: any authMethod: string } - type _subMXzKa = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithPasswordRequestEvent extends _subMXzKa { + type _subxprkc = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithPasswordRequestEvent extends _subxprkc { record?: Record identity: string identityField: string password: string } - type _substJLq = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOAuth2RequestEvent extends _substJLq { + type _subMegrY = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOAuth2RequestEvent extends _subMegrY { providerName: string providerClient: auth.Provider record?: Record @@ -17040,41 +17088,41 @@ namespace core { createData: _TygojaDict isNewRecord: boolean } - type _submURIz = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRefreshRequestEvent extends _submURIz { + type _subQaYaG = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRefreshRequestEvent extends _subQaYaG { record?: Record } - type _subcKIan = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestPasswordResetRequestEvent extends _subcKIan { + type _subahlDh = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestPasswordResetRequestEvent extends _subahlDh { record?: Record } - type _subRMqVJ = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmPasswordResetRequestEvent extends _subRMqVJ { + type _subjnFfi = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmPasswordResetRequestEvent extends _subjnFfi { record?: Record } - type _suboblGi = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestVerificationRequestEvent extends _suboblGi { + type _subKpyCG = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestVerificationRequestEvent extends _subKpyCG { record?: Record } - type _subvTLkV = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmVerificationRequestEvent extends _subvTLkV { + type _subkgxfN = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmVerificationRequestEvent extends _subkgxfN { record?: Record } - type _subYFlNg = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEmailChangeRequestEvent extends _subYFlNg { + type _subSeUJu = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEmailChangeRequestEvent extends _subSeUJu { record?: Record newEmail: string } - type _subntuOr = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmEmailChangeRequestEvent extends _subntuOr { + type _subkItzq = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmEmailChangeRequestEvent extends _subkItzq { record?: Record newEmail: string } /** * ExternalAuth defines a Record proxy for working with the externalAuths collection. */ - type _subZhRGO = Record - interface ExternalAuth extends _subZhRGO { + type _subRmYlG = Record + interface ExternalAuth extends _subRmYlG { } interface ExternalAuth { /** @@ -17514,8 +17562,8 @@ namespace core { */ scan(value: any): void } - type _subrQOrz = BaseModel - interface Log extends _subrQOrz { + type _sublrYPN = BaseModel + interface Log extends _sublrYPN { created: types.DateTime data: types.JSONMap message: string @@ -17534,8 +17582,8 @@ namespace core { /** * MFA defines a Record proxy for working with the mfas collection. */ - type _subOOyQG = Record - interface MFA extends _subOOyQG { + type _subqoUfC = Record + interface MFA extends _subqoUfC { } interface MFA { /** @@ -17614,8 +17662,8 @@ namespace core { /** * OTP defines a Record proxy for working with the otps collection. */ - type _submjxxq = Record - interface OTP extends _submjxxq { + type _subWdxDp = Record + interface OTP extends _subWdxDp { } interface OTP { /** @@ -17686,8 +17734,8 @@ namespace core { /** * Settings defines the PocketBase app settings. */ - type _subIVrQW = settings - interface Settings extends _subIVrQW { + type _subgsoVm = settings + interface Settings extends _subgsoVm { } interface Settings { /** @@ -17772,55 +17820,6 @@ namespace core { } } -/** - * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. - * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. - */ -namespace cobra { - interface PositionalArgs {(cmd: Command, args: Array): void } - // @ts-ignore - import flag = pflag - /** - * FParseErrWhitelist configures Flag parse errors to be ignored - */ - interface FParseErrWhitelist extends _TygojaAny{} - /** - * Group Structure to manage groups for commands - */ - interface Group { - id: string - title: string - } - /** - * ShellCompDirective is a bit map representing the different behaviors the shell - * can be instructed to have once completions have been provided. - */ - interface ShellCompDirective extends Number{} - /** - * CompletionOptions are the options to control shell completion - */ - interface CompletionOptions { - /** - * DisableDefaultCmd prevents Cobra from creating a default 'completion' command - */ - disableDefaultCmd: boolean - /** - * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag - * for shells that support completion descriptions - */ - disableNoDescFlag: boolean - /** - * DisableDescriptions turns off all completion descriptions for shells - * that support them - */ - disableDescriptions: boolean - /** - * HiddenDefaultCmd makes the default 'completion' command hidden - */ - hiddenDefaultCmd: boolean - } -} - /** * Package url parses URLs and implements query escaping. */ @@ -17854,54 +17853,6 @@ namespace url { } } -/** - * Package sql provides a generic interface around SQL (or SQL-like) - * databases. - * - * The sql package must be used in conjunction with a database driver. - * See https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until - * after the query is completed. - * - * For usage examples, see the wiki page at - * https://golang.org/s/sqlwiki. - */ -namespace sql { - /** - * NullString represents a string that may be null. - * NullString implements the [Scanner] interface so - * it can be used as a scan destination: - * - * ``` - * var s NullString - * err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s) - * ... - * if s.Valid { - * // use s.String - * } else { - * // NULL value - * } - * ``` - */ - interface NullString { - string: string - valid: boolean // Valid is true if String is not NULL - } - interface NullString { - /** - * Scan implements the [Scanner] interface. - */ - scan(value: any): void - } - interface NullString { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } -} - /** * Package multipart implements MIME multipart parsing, as defined in RFC * 2046. @@ -18336,6 +18287,54 @@ namespace http { } } +/** + * Package sql provides a generic interface around SQL (or SQL-like) + * databases. + * + * The sql package must be used in conjunction with a database driver. + * See https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until + * after the query is completed. + * + * For usage examples, see the wiki page at + * https://golang.org/s/sqlwiki. + */ +namespace sql { + /** + * NullString represents a string that may be null. + * NullString implements the [Scanner] interface so + * it can be used as a scan destination: + * + * ``` + * var s NullString + * err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s) + * ... + * if s.Valid { + * // use s.String + * } else { + * // NULL value + * } + * ``` + */ + interface NullString { + string: string + valid: boolean // Valid is true if String is not NULL + } + interface NullString { + /** + * Scan implements the [Scanner] interface. + */ + scan(value: any): void + } + interface NullString { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } +} + namespace store { } @@ -18393,176 +18392,16 @@ namespace types { } } -namespace auth { +namespace search { /** - * Provider defines a common interface for an OAuth2 client. + * Result defines the returned search result structure. */ - interface Provider { - [key:string]: any; - /** - * Context returns the context associated with the provider (if any). - */ - context(): context.Context - /** - * SetContext assigns the specified context to the current provider. - */ - setContext(ctx: context.Context): void - /** - * PKCE indicates whether the provider can use the PKCE flow. - */ - pkce(): boolean - /** - * SetPKCE toggles the state whether the provider can use the PKCE flow or not. - */ - setPKCE(enable: boolean): void - /** - * DisplayName usually returns provider name as it is officially written - * and it could be used directly in the UI. - */ - displayName(): string - /** - * SetDisplayName sets the provider's display name. - */ - setDisplayName(displayName: string): void - /** - * Scopes returns the provider access permissions that will be requested. - */ - scopes(): Array - /** - * SetScopes sets the provider access permissions that will be requested later. - */ - setScopes(scopes: Array): void - /** - * ClientId returns the provider client's app ID. - */ - clientId(): string - /** - * SetClientId sets the provider client's ID. - */ - setClientId(clientId: string): void - /** - * ClientSecret returns the provider client's app secret. - */ - clientSecret(): string - /** - * SetClientSecret sets the provider client's app secret. - */ - setClientSecret(secret: string): void - /** - * RedirectURL returns the end address to redirect the user - * going through the OAuth flow. - */ - redirectURL(): string - /** - * SetRedirectURL sets the provider's RedirectURL. - */ - setRedirectURL(url: string): void - /** - * AuthURL returns the provider's authorization service url. - */ - authURL(): string - /** - * SetAuthURL sets the provider's AuthURL. - */ - setAuthURL(url: string): void - /** - * TokenURL returns the provider's token exchange service url. - */ - tokenURL(): string - /** - * SetTokenURL sets the provider's TokenURL. - */ - setTokenURL(url: string): void - /** - * UserInfoURL returns the provider's user info api url. - */ - userInfoURL(): string - /** - * SetUserInfoURL sets the provider's UserInfoURL. - */ - setUserInfoURL(url: string): void - /** - * Client returns an http client using the provided token. - */ - client(token: oauth2.Token): (any) - /** - * BuildAuthURL returns a URL to the provider's consent page - * that asks for permissions for the required scopes explicitly. - */ - buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string - /** - * FetchToken converts an authorization code to token. - */ - fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token) - /** - * FetchRawUserInfo requests and marshalizes into `result` the - * the OAuth user api response. - */ - fetchRawUserInfo(token: oauth2.Token): string|Array - /** - * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and - * marshalizes the user api response into a standardized AuthUser struct. - */ - fetchAuthUser(token: oauth2.Token): (AuthUser) - } - /** - * AuthUser defines a standardized OAuth2 user data structure. - */ - interface AuthUser { - expiry: types.DateTime - rawUser: _TygojaDict - id: string - name: string - username: string - email: string - avatarURL: string - accessToken: string - refreshToken: string - /** - * @todo - * deprecated: use AvatarURL instead - * AvatarUrl will be removed after dropping v0.22 support - */ - avatarUrl: string - } - interface AuthUser { - /** - * MarshalJSON implements the [json.Marshaler] interface. - * - * @todo remove after dropping v0.22 support - */ - marshalJSON(): string|Array - } -} - -namespace hook { - /** - * Event implements [Resolver] and it is intended to be used as a base - * Hook event that you can embed in your custom typed event structs. - * - * Example: - * - * ``` - * type CustomEvent struct { - * hook.Event - * - * SomeField int - * } - * ``` - */ - interface Event { - } - interface Event { - /** - * Next calls the next hook handler. - */ - next(): void - } - /** - * wrapped local Hook embedded struct to limit the public API surface. - */ - type _subopNqc = Hook - interface mainHook extends _subopNqc { + interface Result { + items: any + page: number + perPage: number + totalItems: number + totalPages: number } } @@ -19105,6 +18944,179 @@ namespace slog { import loginternal = internal } +namespace auth { + /** + * Provider defines a common interface for an OAuth2 client. + */ + interface Provider { + [key:string]: any; + /** + * Context returns the context associated with the provider (if any). + */ + context(): context.Context + /** + * SetContext assigns the specified context to the current provider. + */ + setContext(ctx: context.Context): void + /** + * PKCE indicates whether the provider can use the PKCE flow. + */ + pkce(): boolean + /** + * SetPKCE toggles the state whether the provider can use the PKCE flow or not. + */ + setPKCE(enable: boolean): void + /** + * DisplayName usually returns provider name as it is officially written + * and it could be used directly in the UI. + */ + displayName(): string + /** + * SetDisplayName sets the provider's display name. + */ + setDisplayName(displayName: string): void + /** + * Scopes returns the provider access permissions that will be requested. + */ + scopes(): Array + /** + * SetScopes sets the provider access permissions that will be requested later. + */ + setScopes(scopes: Array): void + /** + * ClientId returns the provider client's app ID. + */ + clientId(): string + /** + * SetClientId sets the provider client's ID. + */ + setClientId(clientId: string): void + /** + * ClientSecret returns the provider client's app secret. + */ + clientSecret(): string + /** + * SetClientSecret sets the provider client's app secret. + */ + setClientSecret(secret: string): void + /** + * RedirectURL returns the end address to redirect the user + * going through the OAuth flow. + */ + redirectURL(): string + /** + * SetRedirectURL sets the provider's RedirectURL. + */ + setRedirectURL(url: string): void + /** + * AuthURL returns the provider's authorization service url. + */ + authURL(): string + /** + * SetAuthURL sets the provider's AuthURL. + */ + setAuthURL(url: string): void + /** + * TokenURL returns the provider's token exchange service url. + */ + tokenURL(): string + /** + * SetTokenURL sets the provider's TokenURL. + */ + setTokenURL(url: string): void + /** + * UserInfoURL returns the provider's user info api url. + */ + userInfoURL(): string + /** + * SetUserInfoURL sets the provider's UserInfoURL. + */ + setUserInfoURL(url: string): void + /** + * Client returns an http client using the provided token. + */ + client(token: oauth2.Token): (any) + /** + * BuildAuthURL returns a URL to the provider's consent page + * that asks for permissions for the required scopes explicitly. + */ + buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string + /** + * FetchToken converts an authorization code to token. + */ + fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token) + /** + * FetchRawUserInfo requests and marshalizes into `result` the + * the OAuth user api response. + */ + fetchRawUserInfo(token: oauth2.Token): string|Array + /** + * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and + * marshalizes the user api response into a standardized AuthUser struct. + */ + fetchAuthUser(token: oauth2.Token): (AuthUser) + } + /** + * AuthUser defines a standardized OAuth2 user data structure. + */ + interface AuthUser { + expiry: types.DateTime + rawUser: _TygojaDict + id: string + name: string + username: string + email: string + avatarURL: string + accessToken: string + refreshToken: string + /** + * @todo + * deprecated: use AvatarURL instead + * AvatarUrl will be removed after dropping v0.22 support + */ + avatarUrl: string + } + interface AuthUser { + /** + * MarshalJSON implements the [json.Marshaler] interface. + * + * @todo remove after dropping v0.22 support + */ + marshalJSON(): string|Array + } +} + +namespace hook { + /** + * Event implements [Resolver] and it is intended to be used as a base + * Hook event that you can embed in your custom typed event structs. + * + * Example: + * + * ``` + * type CustomEvent struct { + * hook.Event + * + * SomeField int + * } + * ``` + */ + interface Event { + } + interface Event { + /** + * Next calls the next hook handler. + */ + next(): void + } + /** + * wrapped local Hook embedded struct to limit the public API surface. + */ + type _subrVQTh = Hook + interface mainHook extends _subrVQTh { + } +} + namespace mailer { /** * Message defines a generic email message struct. @@ -19122,19 +19134,6 @@ namespace mailer { } } -namespace search { - /** - * Result defines the returned search result structure. - */ - interface Result { - items: any - page: number - perPage: number - totalItems: number - totalPages: number - } -} - namespace router { // @ts-ignore import validation = ozzo_validation @@ -19267,8 +19266,8 @@ namespace core { /** * @todo experiment eventually replacing the rules *string with a struct? */ - type _subZRoho = BaseModel - interface baseCollection extends _subZRoho { + type _subLUleH = BaseModel + interface baseCollection extends _subLUleH { listRule?: string viewRule?: string createRule?: string @@ -19498,6 +19497,330 @@ namespace net { } } +/** + * Package http provides HTTP client and server implementations. + * + * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: + * + * ``` + * resp, err := http.Get("http://example.com/") + * ... + * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) + * ... + * resp, err := http.PostForm("http://example.com/form", + * url.Values{"key": {"Value"}, "id": {"123"}}) + * ``` + * + * The caller must close the response body when finished with it: + * + * ``` + * resp, err := http.Get("http://example.com/") + * if err != nil { + * // handle error + * } + * defer resp.Body.Close() + * body, err := io.ReadAll(resp.Body) + * // ... + * ``` + * + * # Clients and Transports + * + * For control over HTTP client headers, redirect policy, and other + * settings, create a [Client]: + * + * ``` + * client := &http.Client{ + * CheckRedirect: redirectPolicyFunc, + * } + * + * resp, err := client.Get("http://example.com") + * // ... + * + * req, err := http.NewRequest("GET", "http://example.com", nil) + * // ... + * req.Header.Add("If-None-Match", `W/"wyzzy"`) + * resp, err := client.Do(req) + * // ... + * ``` + * + * For control over proxies, TLS configuration, keep-alives, + * compression, and other settings, create a [Transport]: + * + * ``` + * tr := &http.Transport{ + * MaxIdleConns: 10, + * IdleConnTimeout: 30 * time.Second, + * DisableCompression: true, + * } + * client := &http.Client{Transport: tr} + * resp, err := client.Get("https://example.com") + * ``` + * + * Clients and Transports are safe for concurrent use by multiple + * goroutines and for efficiency should only be created once and re-used. + * + * # Servers + * + * ListenAndServe starts an HTTP server with a given address and handler. + * The handler is usually nil, which means to use [DefaultServeMux]. + * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: + * + * ``` + * http.Handle("/foo", fooHandler) + * + * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { + * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) + * }) + * + * log.Fatal(http.ListenAndServe(":8080", nil)) + * ``` + * + * More control over the server's behavior is available by creating a + * custom Server: + * + * ``` + * s := &http.Server{ + * Addr: ":8080", + * Handler: myHandler, + * ReadTimeout: 10 * time.Second, + * WriteTimeout: 10 * time.Second, + * MaxHeaderBytes: 1 << 20, + * } + * log.Fatal(s.ListenAndServe()) + * ``` + * + * # HTTP/2 + * + * Starting with Go 1.6, the http package has transparent support for the + * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 + * can do so by setting [Transport.TLSNextProto] (for clients) or + * [Server.TLSNextProto] (for servers) to a non-nil, empty + * map. Alternatively, the following GODEBUG settings are + * currently supported: + * + * ``` + * GODEBUG=http2client=0 # disable HTTP/2 client support + * GODEBUG=http2server=0 # disable HTTP/2 server support + * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs + * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps + * ``` + * + * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug + * + * The http package's [Transport] and [Server] both automatically enable + * HTTP/2 support for simple configurations. To enable HTTP/2 for more + * complex configurations, to use lower-level HTTP/2 features, or to use + * a newer version of Go's http2 package, import "golang.org/x/net/http2" + * directly and use its ConfigureTransport and/or ConfigureServer + * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 + * package takes precedence over the net/http package's built-in HTTP/2 + * support. + */ +namespace http { + // @ts-ignore + import mathrand = rand + // @ts-ignore + import urlpkg = url + /** + * A ConnState represents the state of a client connection to a server. + * It's used by the optional [Server.ConnState] hook. + */ + interface ConnState extends Number{} + interface ConnState { + string(): string + } +} + +/** + * Package oauth2 provides support for making + * OAuth2 authorized and authenticated HTTP requests, + * as specified in RFC 6749. + * It can additionally grant authorization with Bearer JWT. + */ +/** + * Copyright 2023 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ +namespace oauth2 { + /** + * An AuthCodeOption is passed to Config.AuthCodeURL. + */ + interface AuthCodeOption { + [key:string]: any; + } + /** + * Token represents the credentials used to authorize + * the requests to access protected resources on the OAuth 2.0 + * provider's backend. + * + * Most users of this package should not access fields of Token + * directly. They're exported mostly for use by related packages + * implementing derivative OAuth2 flows. + */ + interface Token { + /** + * AccessToken is the token that authorizes and authenticates + * the requests. + */ + accessToken: string + /** + * TokenType is the type of token. + * The Type method returns either this or "Bearer", the default. + */ + tokenType: string + /** + * RefreshToken is a token that's used by the application + * (as opposed to the user) to refresh the access token + * if it expires. + */ + refreshToken: string + /** + * Expiry is the optional expiration time of the access token. + * + * If zero, TokenSource implementations will reuse the same + * token forever and RefreshToken or equivalent + * mechanisms for that TokenSource will not be used. + */ + expiry: time.Time + /** + * ExpiresIn is the OAuth2 wire format "expires_in" field, + * which specifies how many seconds later the token expires, + * relative to an unknown time base approximately around "now". + * It is the application's responsibility to populate + * `Expiry` from `ExpiresIn` when required. + */ + expiresIn: number + } + interface Token { + /** + * Type returns t.TokenType if non-empty, else "Bearer". + */ + type(): string + } + interface Token { + /** + * SetAuthHeader sets the Authorization header to r using the access + * token in t. + * + * This method is unnecessary when using Transport or an HTTP Client + * returned by this package. + */ + setAuthHeader(r: http.Request): void + } + interface Token { + /** + * WithExtra returns a new Token that's a clone of t, but using the + * provided raw extra map. This is only intended for use by packages + * implementing derivative OAuth2 flows. + */ + withExtra(extra: { + }): (Token) + } + interface Token { + /** + * Extra returns an extra field. + * Extra fields are key-value pairs returned by the server as a + * part of the token retrieval response. + */ + extra(key: string): { + } + } + interface Token { + /** + * Valid reports whether t is non-nil, has an AccessToken, and is not expired. + */ + valid(): boolean + } +} + +/** + * Package sql provides a generic interface around SQL (or SQL-like) + * databases. + * + * The sql package must be used in conjunction with a database driver. + * See https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until + * after the query is completed. + * + * For usage examples, see the wiki page at + * https://golang.org/s/sqlwiki. + */ +namespace sql { +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { + /** + * JSONArray defines a slice that is safe for json and db read/write. + */ + interface JSONArray extends Array{} + interface JSONArray { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface JSONArray { + /** + * String returns the string representation of the current json array. + */ + string(): string + } + interface JSONArray { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface JSONArray { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current JSONArray[T] instance. + */ + scan(value: any): void + } + /** + * JSONRaw defines a json value type that is safe for db read/write. + */ + interface JSONRaw extends Array{} + interface JSONRaw { + /** + * String returns the current JSONRaw instance as a json encoded string. + */ + string(): string + } + interface JSONRaw { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface JSONRaw { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface JSONRaw { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface JSONRaw { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current JSONRaw instance. + */ + scan(value: any): void + } +} + namespace hook { } @@ -20046,230 +20369,9 @@ namespace slog { } } -/** - * Package sql provides a generic interface around SQL (or SQL-like) - * databases. - * - * The sql package must be used in conjunction with a database driver. - * See https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until - * after the query is completed. - * - * For usage examples, see the wiki page at - * https://golang.org/s/sqlwiki. - */ -namespace sql { -} - -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { - /** - * JSONArray defines a slice that is safe for json and db read/write. - */ - interface JSONArray extends Array{} - interface JSONArray { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface JSONArray { - /** - * String returns the string representation of the current json array. - */ - string(): string - } - interface JSONArray { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface JSONArray { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current JSONArray[T] instance. - */ - scan(value: any): void - } - /** - * JSONRaw defines a json value type that is safe for db read/write. - */ - interface JSONRaw extends Array{} - interface JSONRaw { - /** - * String returns the current JSONRaw instance as a json encoded string. - */ - string(): string - } - interface JSONRaw { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface JSONRaw { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface JSONRaw { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface JSONRaw { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current JSONRaw instance. - */ - scan(value: any): void - } -} - namespace search { } -/** - * Package http provides HTTP client and server implementations. - * - * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: - * - * ``` - * resp, err := http.Get("http://example.com/") - * ... - * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) - * ... - * resp, err := http.PostForm("http://example.com/form", - * url.Values{"key": {"Value"}, "id": {"123"}}) - * ``` - * - * The caller must close the response body when finished with it: - * - * ``` - * resp, err := http.Get("http://example.com/") - * if err != nil { - * // handle error - * } - * defer resp.Body.Close() - * body, err := io.ReadAll(resp.Body) - * // ... - * ``` - * - * # Clients and Transports - * - * For control over HTTP client headers, redirect policy, and other - * settings, create a [Client]: - * - * ``` - * client := &http.Client{ - * CheckRedirect: redirectPolicyFunc, - * } - * - * resp, err := client.Get("http://example.com") - * // ... - * - * req, err := http.NewRequest("GET", "http://example.com", nil) - * // ... - * req.Header.Add("If-None-Match", `W/"wyzzy"`) - * resp, err := client.Do(req) - * // ... - * ``` - * - * For control over proxies, TLS configuration, keep-alives, - * compression, and other settings, create a [Transport]: - * - * ``` - * tr := &http.Transport{ - * MaxIdleConns: 10, - * IdleConnTimeout: 30 * time.Second, - * DisableCompression: true, - * } - * client := &http.Client{Transport: tr} - * resp, err := client.Get("https://example.com") - * ``` - * - * Clients and Transports are safe for concurrent use by multiple - * goroutines and for efficiency should only be created once and re-used. - * - * # Servers - * - * ListenAndServe starts an HTTP server with a given address and handler. - * The handler is usually nil, which means to use [DefaultServeMux]. - * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: - * - * ``` - * http.Handle("/foo", fooHandler) - * - * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { - * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) - * }) - * - * log.Fatal(http.ListenAndServe(":8080", nil)) - * ``` - * - * More control over the server's behavior is available by creating a - * custom Server: - * - * ``` - * s := &http.Server{ - * Addr: ":8080", - * Handler: myHandler, - * ReadTimeout: 10 * time.Second, - * WriteTimeout: 10 * time.Second, - * MaxHeaderBytes: 1 << 20, - * } - * log.Fatal(s.ListenAndServe()) - * ``` - * - * # HTTP/2 - * - * Starting with Go 1.6, the http package has transparent support for the - * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 - * can do so by setting [Transport.TLSNextProto] (for clients) or - * [Server.TLSNextProto] (for servers) to a non-nil, empty - * map. Alternatively, the following GODEBUG settings are - * currently supported: - * - * ``` - * GODEBUG=http2client=0 # disable HTTP/2 client support - * GODEBUG=http2server=0 # disable HTTP/2 server support - * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs - * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps - * ``` - * - * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug - * - * The http package's [Transport] and [Server] both automatically enable - * HTTP/2 support for simple configurations. To enable HTTP/2 for more - * complex configurations, to use lower-level HTTP/2 features, or to use - * a newer version of Go's http2 package, import "golang.org/x/net/http2" - * directly and use its ConfigureTransport and/or ConfigureServer - * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 - * package takes precedence over the net/http package's built-in HTTP/2 - * support. - */ -namespace http { - // @ts-ignore - import mathrand = rand - // @ts-ignore - import urlpkg = url - /** - * A ConnState represents the state of a client connection to a server. - * It's used by the optional [Server.ConnState] hook. - */ - interface ConnState extends Number{} - interface ConnState { - string(): string - } -} - namespace router { // @ts-ignore import validation = ozzo_validation @@ -20278,109 +20380,6 @@ namespace router { namespace subscriptions { } -/** - * Package oauth2 provides support for making - * OAuth2 authorized and authenticated HTTP requests, - * as specified in RFC 6749. - * It can additionally grant authorization with Bearer JWT. - */ -/** - * Copyright 2023 The Go Authors. All rights reserved. - * Use of this source code is governed by a BSD-style - * license that can be found in the LICENSE file. - */ -namespace oauth2 { - /** - * An AuthCodeOption is passed to Config.AuthCodeURL. - */ - interface AuthCodeOption { - [key:string]: any; - } - /** - * Token represents the credentials used to authorize - * the requests to access protected resources on the OAuth 2.0 - * provider's backend. - * - * Most users of this package should not access fields of Token - * directly. They're exported mostly for use by related packages - * implementing derivative OAuth2 flows. - */ - interface Token { - /** - * AccessToken is the token that authorizes and authenticates - * the requests. - */ - accessToken: string - /** - * TokenType is the type of token. - * The Type method returns either this or "Bearer", the default. - */ - tokenType: string - /** - * RefreshToken is a token that's used by the application - * (as opposed to the user) to refresh the access token - * if it expires. - */ - refreshToken: string - /** - * Expiry is the optional expiration time of the access token. - * - * If zero, TokenSource implementations will reuse the same - * token forever and RefreshToken or equivalent - * mechanisms for that TokenSource will not be used. - */ - expiry: time.Time - /** - * ExpiresIn is the OAuth2 wire format "expires_in" field, - * which specifies how many seconds later the token expires, - * relative to an unknown time base approximately around "now". - * It is the application's responsibility to populate - * `Expiry` from `ExpiresIn` when required. - */ - expiresIn: number - } - interface Token { - /** - * Type returns t.TokenType if non-empty, else "Bearer". - */ - type(): string - } - interface Token { - /** - * SetAuthHeader sets the Authorization header to r using the access - * token in t. - * - * This method is unnecessary when using Transport or an HTTP Client - * returned by this package. - */ - setAuthHeader(r: http.Request): void - } - interface Token { - /** - * WithExtra returns a new Token that's a clone of t, but using the - * provided raw extra map. This is only intended for use by packages - * implementing derivative OAuth2 flows. - */ - withExtra(extra: { - }): (Token) - } - interface Token { - /** - * Extra returns an extra field. - * Extra fields are key-value pairs returned by the server as a - * part of the token retrieval response. - */ - extra(key: string): { - } - } - interface Token { - /** - * Valid reports whether t is non-nil, has an AccessToken, and is not expired. - */ - valid(): boolean - } -} - /** * Package core is the backbone of PocketBase. *