diff --git a/plugins/jsvm/internal/types/generated/types.d.ts b/plugins/jsvm/internal/types/generated/types.d.ts index 26012300..97e371fe 100644 --- a/plugins/jsvm/internal/types/generated/types.d.ts +++ b/plugins/jsvm/internal/types/generated/types.d.ts @@ -1,4 +1,4 @@ -// 1744111820 +// 1744743467 // GENERATED CODE - DO NOT MODIFY BY HAND // ------------------------------------------------------------------- @@ -1760,8 +1760,8 @@ namespace os { * than ReadFrom. This is used to permit ReadFrom to call io.Copy * without leading to a recursive call to ReadFrom. */ - type _skKxnOp = noReadFrom&File - interface fileWithoutReadFrom extends _skKxnOp { + type _skznVQb = noReadFrom&File + interface fileWithoutReadFrom extends _skznVQb { } interface File { /** @@ -1805,8 +1805,8 @@ namespace os { * than WriteTo. This is used to permit WriteTo to call io.Copy * without leading to a recursive call to WriteTo. */ - type _sXPhxiu = noWriteTo&File - interface fileWithoutWriteTo extends _sXPhxiu { + type _sQylRmX = noWriteTo&File + interface fileWithoutWriteTo extends _sQylRmX { } interface File { /** @@ -2450,8 +2450,8 @@ namespace os { * * The methods of File are safe for concurrent use. */ - type _shLHxQY = file - interface File extends _shLHxQY { + type _sUqDciF = file + interface File extends _sUqDciF { } /** * A FileInfo describes a file and is returned by [Stat] and [Lstat]. @@ -2861,6 +2861,503 @@ namespace ozzo_validation { } } +namespace security { + interface s256Challenge { + /** + * S256Challenge creates base64 encoded sha256 challenge string derived from code. + * The padding of the result base64 string is stripped per [RFC 7636]. + * + * [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2 + */ + (code: string): string + } + interface md5 { + /** + * MD5 creates md5 hash from the provided plain text. + */ + (text: string): string + } + interface sha256 { + /** + * SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text. + */ + (text: string): string + } + interface sha512 { + /** + * SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text. + */ + (text: string): string + } + interface hs256 { + /** + * HS256 creates a HMAC hash with sha256 digest algorithm. + */ + (text: string, secret: string): string + } + interface hs512 { + /** + * HS512 creates a HMAC hash with sha512 digest algorithm. + */ + (text: string, secret: string): string + } + interface equal { + /** + * Equal compares two hash strings for equality without leaking timing information. + */ + (hash1: string, hash2: string): boolean + } + // @ts-ignore + import crand = rand + interface encrypt { + /** + * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key). + * + * This method uses AES-256-GCM block cypher mode. + */ + (data: string|Array, key: string): string + } + interface decrypt { + /** + * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key). + * + * This method uses AES-256-GCM block cypher mode. + */ + (cipherText: string, key: string): string|Array + } + interface parseUnverifiedJWT { + /** + * ParseUnverifiedJWT parses JWT and returns its claims + * but DOES NOT verify the signature. + * + * It verifies only the exp, iat and nbf claims. + */ + (token: string): jwt.MapClaims + } + interface parseJWT { + /** + * ParseJWT verifies and parses JWT and returns its claims. + */ + (token: string, verificationKey: string): jwt.MapClaims + } + interface newJWT { + /** + * NewJWT generates and returns new HS256 signed JWT. + */ + (payload: jwt.MapClaims, signingKey: string, duration: time.Duration): string + } + // @ts-ignore + import cryptoRand = rand + // @ts-ignore + import mathRand = rand + interface randomString { + /** + * RandomString generates a cryptographically random string with the specified length. + * + * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. + */ + (length: number): string + } + interface randomStringWithAlphabet { + /** + * RandomStringWithAlphabet generates a cryptographically random string + * with the specified length and characters set. + * + * It panics if for some reason rand.Int returns a non-nil error. + */ + (length: number, alphabet: string): string + } + interface pseudorandomString { + /** + * PseudorandomString generates a pseudorandom string with the specified length. + * + * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. + * + * For a cryptographically random string (but a little bit slower) use RandomString instead. + */ + (length: number): string + } + interface pseudorandomStringWithAlphabet { + /** + * PseudorandomStringWithAlphabet generates a pseudorandom string + * with the specified length and characters set. + * + * For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead. + */ + (length: number, alphabet: string): string + } + interface randomStringByRegex { + /** + * RandomStringByRegex generates a random string matching the regex pattern. + * If optFlags is not set, fallbacks to [syntax.Perl]. + * + * NB! While the source of the randomness comes from [crypto/rand] this method + * is not recommended to be used on its own in critical secure contexts because + * the generated length could vary too much on the used pattern and may not be + * as secure as simply calling [security.RandomString]. + * If you still insist on using it for such purposes, consider at least + * a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`. + * + * This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages. + */ + (pattern: string, ...optFlags: syntax.Flags[]): string + } +} + +namespace filesystem { + /** + * FileReader defines an interface for a file resource reader. + */ + interface FileReader { + [key:string]: any; + open(): io.ReadSeekCloser + } + /** + * File defines a single file [io.ReadSeekCloser] resource. + * + * The file could be from a local path, multipart/form-data header, etc. + */ + interface File { + reader: FileReader + name: string + originalName: string + size: number + } + interface File { + /** + * AsMap implements [core.mapExtractor] and returns a value suitable + * to be used in an API rule expression. + */ + asMap(): _TygojaDict + } + interface newFileFromPath { + /** + * NewFileFromPath creates a new File instance from the provided local file path. + */ + (path: string): (File) + } + interface newFileFromBytes { + /** + * NewFileFromBytes creates a new File instance from the provided byte slice. + */ + (b: string|Array, name: string): (File) + } + interface newFileFromMultipart { + /** + * NewFileFromMultipart creates a new File from the provided multipart header. + */ + (mh: multipart.FileHeader): (File) + } + interface newFileFromURL { + /** + * NewFileFromURL creates a new File from the provided url by + * downloading the resource and load it as BytesReader. + * + * Example + * + * ``` + * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + * defer cancel() + * + * file, err := filesystem.NewFileFromURL(ctx, "https://example.com/image.png") + * ``` + */ + (ctx: context.Context, url: string): (File) + } + /** + * MultipartReader defines a FileReader from [multipart.FileHeader]. + */ + interface MultipartReader { + header?: multipart.FileHeader + } + interface MultipartReader { + /** + * Open implements the [filesystem.FileReader] interface. + */ + open(): io.ReadSeekCloser + } + /** + * PathReader defines a FileReader from a local file path. + */ + interface PathReader { + path: string + } + interface PathReader { + /** + * Open implements the [filesystem.FileReader] interface. + */ + open(): io.ReadSeekCloser + } + /** + * BytesReader defines a FileReader from bytes content. + */ + interface BytesReader { + bytes: string|Array + } + interface BytesReader { + /** + * Open implements the [filesystem.FileReader] interface. + */ + open(): io.ReadSeekCloser + } + type _soNgctT = bytes.Reader + interface bytesReadSeekCloser extends _soNgctT { + } + interface bytesReadSeekCloser { + /** + * Close implements the [io.ReadSeekCloser] interface. + */ + close(): void + } + interface System { + } + interface newS3 { + /** + * NewS3 initializes an S3 filesystem instance. + * + * NB! Make sure to call `Close()` after you are done working with it. + */ + (bucketName: string, region: string, endpoint: string, accessKey: string, secretKey: string, s3ForcePathStyle: boolean): (System) + } + interface newLocal { + /** + * NewLocal initializes a new local filesystem instance. + * + * NB! Make sure to call `Close()` after you are done working with it. + */ + (dirPath: string): (System) + } + interface System { + /** + * SetContext assigns the specified context to the current filesystem. + */ + setContext(ctx: context.Context): void + } + interface System { + /** + * Close releases any resources used for the related filesystem. + */ + close(): void + } + interface System { + /** + * Exists checks if file with fileKey path exists or not. + */ + exists(fileKey: string): boolean + } + interface System { + /** + * Attributes returns the attributes for the file with fileKey path. + * + * If the file doesn't exist it returns ErrNotFound. + */ + attributes(fileKey: string): (blob.Attributes) + } + interface System { + /** + * GetFile returns a file content reader for the given fileKey. + * + * NB! Make sure to call Close() on the file after you are done working with it. + * + * If the file doesn't exist returns ErrNotFound. + */ + getFile(fileKey: string): (blob.Reader) + } + interface System { + /** + * Copy copies the file stored at srcKey to dstKey. + * + * If srcKey file doesn't exist, it returns ErrNotFound. + * + * If dstKey file already exists, it is overwritten. + */ + copy(srcKey: string, dstKey: string): void + } + interface System { + /** + * List returns a flat list with info for all files under the specified prefix. + */ + list(prefix: string): Array<(blob.ListObject | undefined)> + } + interface System { + /** + * Upload writes content into the fileKey location. + */ + upload(content: string|Array, fileKey: string): void + } + interface System { + /** + * UploadFile uploads the provided File to the fileKey location. + */ + uploadFile(file: File, fileKey: string): void + } + interface System { + /** + * UploadMultipart uploads the provided multipart file to the fileKey location. + */ + uploadMultipart(fh: multipart.FileHeader, fileKey: string): void + } + interface System { + /** + * Delete deletes stored file at fileKey location. + * + * If the file doesn't exist returns ErrNotFound. + */ + delete(fileKey: string): void + } + interface System { + /** + * DeletePrefix deletes everything starting with the specified prefix. + * + * The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_"). + */ + deletePrefix(prefix: string): Array + } + interface System { + /** + * Checks if the provided dir prefix doesn't have any files. + * + * A trailing slash will be appended to a non-empty dir string argument + * to ensure that the checked prefix is a "directory". + * + * Returns "false" in case the has at least one file, otherwise - "true". + */ + isEmptyDir(dir: string): boolean + } + interface System { + /** + * Serve serves the file at fileKey location to an HTTP response. + * + * If the `download` query parameter is used the file will be always served for + * download no matter of its type (aka. with "Content-Disposition: attachment"). + * + * Internally this method uses [http.ServeContent] so Range requests, + * If-Match, If-Unmodified-Since, etc. headers are handled transparently. + */ + serve(res: http.ResponseWriter, req: http.Request, fileKey: string, name: string): void + } + interface System { + /** + * CreateThumb creates a new thumb image for the file at originalKey location. + * The new thumb file is stored at thumbKey location. + * + * thumbSize is in the format: + * - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio + * - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio + * - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center) + * - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top) + * - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom) + * - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping) + */ + createThumb(originalKey: string, thumbKey: string, thumbSize: string): void + } +} + +/** + * Package template is a thin wrapper around the standard html/template + * and text/template packages that implements a convenient registry to + * load and cache templates on the fly concurrently. + * + * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. + * + * Example: + * + * ``` + * registry := template.NewRegistry() + * + * html1, err := registry.LoadFiles( + * // the files set wil be parsed only once and then cached + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "John"}) + * + * html2, err := registry.LoadFiles( + * // reuse the already parsed and cached files set + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "Jane"}) + * ``` + */ +namespace template { + interface newRegistry { + /** + * NewRegistry creates and initializes a new templates registry with + * some defaults (eg. global "raw" template function for unescaped HTML). + * + * Use the Registry.Load* methods to load templates into the registry. + */ + (): (Registry) + } + /** + * Registry defines a templates registry that is safe to be used by multiple goroutines. + * + * Use the Registry.Load* methods to load templates into the registry. + */ + interface Registry { + } + interface Registry { + /** + * AddFuncs registers new global template functions. + * + * The key of each map entry is the function name that will be used in the templates. + * If a function with the map entry name already exists it will be replaced with the new one. + * + * The value of each map entry is a function that must have either a + * single return value, or two return values of which the second has type error. + * + * Example: + * + * ``` + * r.AddFuncs(map[string]any{ + * "toUpper": func(str string) string { + * return strings.ToUppser(str) + * }, + * ... + * }) + * ``` + */ + addFuncs(funcs: _TygojaDict): (Registry) + } + interface Registry { + /** + * LoadFiles caches (if not already) the specified filenames set as a + * single template and returns a ready to use Renderer instance. + * + * There must be at least 1 filename specified. + */ + loadFiles(...filenames: string[]): (Renderer) + } + interface Registry { + /** + * LoadString caches (if not already) the specified inline string as a + * single template and returns a ready to use Renderer instance. + */ + loadString(text: string): (Renderer) + } + interface Registry { + /** + * LoadFS caches (if not already) the specified fs and globPatterns + * pair as single template and returns a ready to use Renderer instance. + * + * There must be at least 1 file matching the provided globPattern(s) + * (note that most file names serves as glob patterns matching themselves). + */ + loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) + } + /** + * Renderer defines a single parsed template. + */ + interface Renderer { + } + interface Renderer { + /** + * Render executes the template with the specified data as the dot object + * and returns the result as plain string. + */ + render(data: any): string + } +} + /** * Package dbx provides a set of DB-agnostic and easy-to-use query building methods for relational databases. */ @@ -3197,14 +3694,14 @@ namespace dbx { /** * MssqlBuilder is the builder for SQL Server databases. */ - type _sVdLHrE = BaseBuilder - interface MssqlBuilder extends _sVdLHrE { + type _snoMQCE = BaseBuilder + interface MssqlBuilder extends _snoMQCE { } /** * MssqlQueryBuilder is the query builder for SQL Server databases. */ - type _sBltlJG = BaseQueryBuilder - interface MssqlQueryBuilder extends _sBltlJG { + type _siEnTuc = BaseQueryBuilder + interface MssqlQueryBuilder extends _siEnTuc { } interface newMssqlBuilder { /** @@ -3275,8 +3772,8 @@ namespace dbx { /** * MysqlBuilder is the builder for MySQL databases. */ - type _stRokrm = BaseBuilder - interface MysqlBuilder extends _stRokrm { + type _sYTbhxE = BaseBuilder + interface MysqlBuilder extends _sYTbhxE { } interface newMysqlBuilder { /** @@ -3351,14 +3848,14 @@ namespace dbx { /** * OciBuilder is the builder for Oracle databases. */ - type _sXJRDKu = BaseBuilder - interface OciBuilder extends _sXJRDKu { + type _sQnZwtP = BaseBuilder + interface OciBuilder extends _sQnZwtP { } /** * OciQueryBuilder is the query builder for Oracle databases. */ - type _sjRRAmZ = BaseQueryBuilder - interface OciQueryBuilder extends _sjRRAmZ { + type _sMCzimX = BaseQueryBuilder + interface OciQueryBuilder extends _sMCzimX { } interface newOciBuilder { /** @@ -3421,8 +3918,8 @@ namespace dbx { /** * PgsqlBuilder is the builder for PostgreSQL databases. */ - type _sknQkyn = BaseBuilder - interface PgsqlBuilder extends _sknQkyn { + type _shkaVwR = BaseBuilder + interface PgsqlBuilder extends _shkaVwR { } interface newPgsqlBuilder { /** @@ -3489,8 +3986,8 @@ namespace dbx { /** * SqliteBuilder is the builder for SQLite databases. */ - type _sjBXVAc = BaseBuilder - interface SqliteBuilder extends _sjBXVAc { + type _sSblqQg = BaseBuilder + interface SqliteBuilder extends _sSblqQg { } interface newSqliteBuilder { /** @@ -3589,8 +4086,8 @@ namespace dbx { /** * StandardBuilder is the builder that is used by DB for an unknown driver. */ - type _spknCdq = BaseBuilder - interface StandardBuilder extends _spknCdq { + type _sAddwXw = BaseBuilder + interface StandardBuilder extends _sAddwXw { } interface newStandardBuilder { /** @@ -3656,8 +4153,8 @@ namespace dbx { * DB enhances sql.DB by providing a set of DB-agnostic query building methods. * DB allows easier query building and population of data into Go variables. */ - type _szwuUXD = Builder - interface DB extends _szwuUXD { + type _sKOlKld = Builder + interface DB extends _sKOlKld { /** * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc. */ @@ -4461,8 +4958,8 @@ namespace dbx { * Rows enhances sql.Rows by providing additional data query methods. * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row. */ - type _srwNrtH = sql.Rows - interface Rows extends _srwNrtH { + type _szaXPId = sql.Rows + interface Rows extends _szaXPId { } interface Rows { /** @@ -4834,8 +5331,8 @@ namespace dbx { }): string } interface structInfo { } - type _sNxeqUe = structInfo - interface structValue extends _sNxeqUe { + type _spOEQHb = structInfo + interface structValue extends _spOEQHb { } interface fieldInfo { } @@ -4874,8 +5371,8 @@ namespace dbx { /** * Tx enhances sql.Tx with additional querying methods. */ - type _sqJKVPV = Builder - interface Tx extends _sqJKVPV { + type _sxGpiRA = Builder + interface Tx extends _sxGpiRA { } interface Tx { /** @@ -4891,398 +5388,6 @@ namespace dbx { } } -namespace security { - interface s256Challenge { - /** - * S256Challenge creates base64 encoded sha256 challenge string derived from code. - * The padding of the result base64 string is stripped per [RFC 7636]. - * - * [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2 - */ - (code: string): string - } - interface md5 { - /** - * MD5 creates md5 hash from the provided plain text. - */ - (text: string): string - } - interface sha256 { - /** - * SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text. - */ - (text: string): string - } - interface sha512 { - /** - * SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text. - */ - (text: string): string - } - interface hs256 { - /** - * HS256 creates a HMAC hash with sha256 digest algorithm. - */ - (text: string, secret: string): string - } - interface hs512 { - /** - * HS512 creates a HMAC hash with sha512 digest algorithm. - */ - (text: string, secret: string): string - } - interface equal { - /** - * Equal compares two hash strings for equality without leaking timing information. - */ - (hash1: string, hash2: string): boolean - } - // @ts-ignore - import crand = rand - interface encrypt { - /** - * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key). - * - * This method uses AES-256-GCM block cypher mode. - */ - (data: string|Array, key: string): string - } - interface decrypt { - /** - * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key). - * - * This method uses AES-256-GCM block cypher mode. - */ - (cipherText: string, key: string): string|Array - } - interface parseUnverifiedJWT { - /** - * ParseUnverifiedJWT parses JWT and returns its claims - * but DOES NOT verify the signature. - * - * It verifies only the exp, iat and nbf claims. - */ - (token: string): jwt.MapClaims - } - interface parseJWT { - /** - * ParseJWT verifies and parses JWT and returns its claims. - */ - (token: string, verificationKey: string): jwt.MapClaims - } - interface newJWT { - /** - * NewJWT generates and returns new HS256 signed JWT. - */ - (payload: jwt.MapClaims, signingKey: string, duration: time.Duration): string - } - // @ts-ignore - import cryptoRand = rand - // @ts-ignore - import mathRand = rand - interface randomString { - /** - * RandomString generates a cryptographically random string with the specified length. - * - * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. - */ - (length: number): string - } - interface randomStringWithAlphabet { - /** - * RandomStringWithAlphabet generates a cryptographically random string - * with the specified length and characters set. - * - * It panics if for some reason rand.Int returns a non-nil error. - */ - (length: number, alphabet: string): string - } - interface pseudorandomString { - /** - * PseudorandomString generates a pseudorandom string with the specified length. - * - * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. - * - * For a cryptographically random string (but a little bit slower) use RandomString instead. - */ - (length: number): string - } - interface pseudorandomStringWithAlphabet { - /** - * PseudorandomStringWithAlphabet generates a pseudorandom string - * with the specified length and characters set. - * - * For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead. - */ - (length: number, alphabet: string): string - } - interface randomStringByRegex { - /** - * RandomStringByRegex generates a random string matching the regex pattern. - * If optFlags is not set, fallbacks to [syntax.Perl]. - * - * NB! While the source of the randomness comes from [crypto/rand] this method - * is not recommended to be used on its own in critical secure contexts because - * the generated length could vary too much on the used pattern and may not be - * as secure as simply calling [security.RandomString]. - * If you still insist on using it for such purposes, consider at least - * a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`. - * - * This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages. - */ - (pattern: string, ...optFlags: syntax.Flags[]): string - } -} - -namespace filesystem { - /** - * FileReader defines an interface for a file resource reader. - */ - interface FileReader { - [key:string]: any; - open(): io.ReadSeekCloser - } - /** - * File defines a single file [io.ReadSeekCloser] resource. - * - * The file could be from a local path, multipart/form-data header, etc. - */ - interface File { - reader: FileReader - name: string - originalName: string - size: number - } - interface File { - /** - * AsMap implements [core.mapExtractor] and returns a value suitable - * to be used in an API rule expression. - */ - asMap(): _TygojaDict - } - interface newFileFromPath { - /** - * NewFileFromPath creates a new File instance from the provided local file path. - */ - (path: string): (File) - } - interface newFileFromBytes { - /** - * NewFileFromBytes creates a new File instance from the provided byte slice. - */ - (b: string|Array, name: string): (File) - } - interface newFileFromMultipart { - /** - * NewFileFromMultipart creates a new File from the provided multipart header. - */ - (mh: multipart.FileHeader): (File) - } - interface newFileFromURL { - /** - * NewFileFromURL creates a new File from the provided url by - * downloading the resource and load it as BytesReader. - * - * Example - * - * ``` - * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - * defer cancel() - * - * file, err := filesystem.NewFileFromURL(ctx, "https://example.com/image.png") - * ``` - */ - (ctx: context.Context, url: string): (File) - } - /** - * MultipartReader defines a FileReader from [multipart.FileHeader]. - */ - interface MultipartReader { - header?: multipart.FileHeader - } - interface MultipartReader { - /** - * Open implements the [filesystem.FileReader] interface. - */ - open(): io.ReadSeekCloser - } - /** - * PathReader defines a FileReader from a local file path. - */ - interface PathReader { - path: string - } - interface PathReader { - /** - * Open implements the [filesystem.FileReader] interface. - */ - open(): io.ReadSeekCloser - } - /** - * BytesReader defines a FileReader from bytes content. - */ - interface BytesReader { - bytes: string|Array - } - interface BytesReader { - /** - * Open implements the [filesystem.FileReader] interface. - */ - open(): io.ReadSeekCloser - } - type _sKgVTwv = bytes.Reader - interface bytesReadSeekCloser extends _sKgVTwv { - } - interface bytesReadSeekCloser { - /** - * Close implements the [io.ReadSeekCloser] interface. - */ - close(): void - } - interface System { - } - interface newS3 { - /** - * NewS3 initializes an S3 filesystem instance. - * - * NB! Make sure to call `Close()` after you are done working with it. - */ - (bucketName: string, region: string, endpoint: string, accessKey: string, secretKey: string, s3ForcePathStyle: boolean): (System) - } - interface newLocal { - /** - * NewLocal initializes a new local filesystem instance. - * - * NB! Make sure to call `Close()` after you are done working with it. - */ - (dirPath: string): (System) - } - interface System { - /** - * SetContext assigns the specified context to the current filesystem. - */ - setContext(ctx: context.Context): void - } - interface System { - /** - * Close releases any resources used for the related filesystem. - */ - close(): void - } - interface System { - /** - * Exists checks if file with fileKey path exists or not. - */ - exists(fileKey: string): boolean - } - interface System { - /** - * Attributes returns the attributes for the file with fileKey path. - * - * If the file doesn't exist it returns ErrNotFound. - */ - attributes(fileKey: string): (blob.Attributes) - } - interface System { - /** - * GetFile returns a file content reader for the given fileKey. - * - * NB! Make sure to call Close() on the file after you are done working with it. - * - * If the file doesn't exist returns ErrNotFound. - */ - getFile(fileKey: string): (blob.Reader) - } - interface System { - /** - * Copy copies the file stored at srcKey to dstKey. - * - * If srcKey file doesn't exist, it returns ErrNotFound. - * - * If dstKey file already exists, it is overwritten. - */ - copy(srcKey: string, dstKey: string): void - } - interface System { - /** - * List returns a flat list with info for all files under the specified prefix. - */ - list(prefix: string): Array<(blob.ListObject | undefined)> - } - interface System { - /** - * Upload writes content into the fileKey location. - */ - upload(content: string|Array, fileKey: string): void - } - interface System { - /** - * UploadFile uploads the provided File to the fileKey location. - */ - uploadFile(file: File, fileKey: string): void - } - interface System { - /** - * UploadMultipart uploads the provided multipart file to the fileKey location. - */ - uploadMultipart(fh: multipart.FileHeader, fileKey: string): void - } - interface System { - /** - * Delete deletes stored file at fileKey location. - * - * If the file doesn't exist returns ErrNotFound. - */ - delete(fileKey: string): void - } - interface System { - /** - * DeletePrefix deletes everything starting with the specified prefix. - * - * The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_"). - */ - deletePrefix(prefix: string): Array - } - interface System { - /** - * Checks if the provided dir prefix doesn't have any files. - * - * A trailing slash will be appended to a non-empty dir string argument - * to ensure that the checked prefix is a "directory". - * - * Returns "false" in case the has at least one file, otherwise - "true". - */ - isEmptyDir(dir: string): boolean - } - interface System { - /** - * Serve serves the file at fileKey location to an HTTP response. - * - * If the `download` query parameter is used the file will be always served for - * download no matter of its type (aka. with "Content-Disposition: attachment"). - * - * Internally this method uses [http.ServeContent] so Range requests, - * If-Match, If-Unmodified-Since, etc. headers are handled transparently. - */ - serve(res: http.ResponseWriter, req: http.Request, fileKey: string, name: string): void - } - interface System { - /** - * CreateThumb creates a new thumb image for the file at originalKey location. - * The new thumb file is stored at thumbKey location. - * - * thumbSize is in the format: - * - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio - * - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio - * - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center) - * - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top) - * - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom) - * - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping) - */ - createThumb(originalKey: string, thumbKey: string, thumbSize: string): void - } -} - /** * Package exec runs external commands. It wraps os.StartProcess to make it * easier to remap stdin and stdout, connect I/O with pipes, and do other @@ -5508,7 +5613,7 @@ namespace core { */ newFilesystem(): (filesystem.System) /** - * NewFilesystem creates a new local or S3 filesystem instance + * NewBackupsFilesystem creates a new local or S3 filesystem instance * for managing app backups based on the current app settings. * * NB! Make sure to call Close() on the returned result @@ -7021,8 +7126,8 @@ namespace core { /** * AuthOrigin defines a Record proxy for working with the authOrigins collection. */ - type _subhSuN = Record - interface AuthOrigin extends _subhSuN { + type _sDNJfdG = Record + interface AuthOrigin extends _sDNJfdG { } interface newAuthOrigin { /** @@ -7344,7 +7449,7 @@ namespace core { } interface BaseApp { /** - * NewFilesystem creates a new local or S3 filesystem instance + * NewBackupsFilesystem creates a new local or S3 filesystem instance * for managing app backups based on the current app settings. * * NB! Make sure to call Close() on the returned result @@ -7716,8 +7821,8 @@ namespace core { /** * @todo experiment eventually replacing the rules *string with a struct? */ - type _sWkIqSg = BaseModel - interface baseCollection extends _sWkIqSg { + type _sCHkJml = BaseModel + interface baseCollection extends _sCHkJml { listRule?: string viewRule?: string createRule?: string @@ -7744,8 +7849,8 @@ namespace core { /** * Collection defines the table, fields and various options related to a set of records. */ - type _sCSSmeH = baseCollection&collectionAuthOptions&collectionViewOptions - interface Collection extends _sCSSmeH { + type _smdVvPV = baseCollection&collectionAuthOptions&collectionViewOptions + interface Collection extends _smdVvPV { } interface newCollection { /** @@ -8574,8 +8679,8 @@ namespace core { /** * RequestEvent defines the PocketBase router handler event. */ - type _sUHrXYv = router.Event - interface RequestEvent extends _sUHrXYv { + type _stvLnQL = router.Event + interface RequestEvent extends _stvLnQL { app: App auth?: Record } @@ -8635,8 +8740,8 @@ namespace core { */ clone(): (RequestInfo) } - type _siRbLRv = hook.Event&RequestEvent - interface BatchRequestEvent extends _siRbLRv { + type _sIawfNN = hook.Event&RequestEvent + interface BatchRequestEvent extends _sIawfNN { batch: Array<(InternalRequest | undefined)> } interface InternalRequest { @@ -8673,24 +8778,24 @@ namespace core { interface baseCollectionEventData { tags(): Array } - type _sobBLXS = hook.Event - interface BootstrapEvent extends _sobBLXS { + type _soafXGE = hook.Event + interface BootstrapEvent extends _soafXGE { app: App } - type _skOAlQC = hook.Event - interface TerminateEvent extends _skOAlQC { + type _syTjWlg = hook.Event + interface TerminateEvent extends _syTjWlg { app: App isRestart: boolean } - type _sQHKmkX = hook.Event - interface BackupEvent extends _sQHKmkX { + type _sIrgSnO = hook.Event + interface BackupEvent extends _sIrgSnO { app: App context: context.Context name: string // the name of the backup to create/restore. exclude: Array // list of dir entries to exclude from the backup create/restore. } - type _sShqrkC = hook.Event - interface ServeEvent extends _sShqrkC { + type _srCYnWe = hook.Event + interface ServeEvent extends _srCYnWe { app: App router?: router.Router server?: http.Server @@ -8713,31 +8818,31 @@ namespace core { */ installerFunc: (app: App, systemSuperuser: Record, baseURL: string) => void } - type _sPuImKL = hook.Event&RequestEvent - interface SettingsListRequestEvent extends _sPuImKL { + type _sHVJEnN = hook.Event&RequestEvent + interface SettingsListRequestEvent extends _sHVJEnN { settings?: Settings } - type _sAbLBTU = hook.Event&RequestEvent - interface SettingsUpdateRequestEvent extends _sAbLBTU { + type _sjLQcYh = hook.Event&RequestEvent + interface SettingsUpdateRequestEvent extends _sjLQcYh { oldSettings?: Settings newSettings?: Settings } - type _sXofaGH = hook.Event - interface SettingsReloadEvent extends _sXofaGH { + type _scaLQhF = hook.Event + interface SettingsReloadEvent extends _scaLQhF { app: App } - type _svdtsfF = hook.Event - interface MailerEvent extends _svdtsfF { + type _shHJfMa = hook.Event + interface MailerEvent extends _shHJfMa { app: App mailer: mailer.Mailer message?: mailer.Message } - type _sKfduul = MailerEvent&baseRecordEventData - interface MailerRecordEvent extends _sKfduul { + type _sFAehvR = MailerEvent&baseRecordEventData + interface MailerRecordEvent extends _sFAehvR { meta: _TygojaDict } - type _sKVKErO = hook.Event&baseModelEventData - interface ModelEvent extends _sKVKErO { + type _sKRtAAS = hook.Event&baseModelEventData + interface ModelEvent extends _sKRtAAS { app: App context: context.Context /** @@ -8749,12 +8854,12 @@ namespace core { */ type: string } - type _sjHTuCk = ModelEvent - interface ModelErrorEvent extends _sjHTuCk { + type _sbFCvyL = ModelEvent + interface ModelErrorEvent extends _sbFCvyL { error: Error } - type _sySurGA = hook.Event&baseRecordEventData - interface RecordEvent extends _sySurGA { + type _sQzEEzu = hook.Event&baseRecordEventData + interface RecordEvent extends _sQzEEzu { app: App context: context.Context /** @@ -8766,12 +8871,12 @@ namespace core { */ type: string } - type _sAOZUbV = RecordEvent - interface RecordErrorEvent extends _sAOZUbV { + type _sHXqncT = RecordEvent + interface RecordErrorEvent extends _sHXqncT { error: Error } - type _ssJFgjA = hook.Event&baseCollectionEventData - interface CollectionEvent extends _ssJFgjA { + type _sWJvnfb = hook.Event&baseCollectionEventData + interface CollectionEvent extends _sWJvnfb { app: App context: context.Context /** @@ -8783,95 +8888,95 @@ namespace core { */ type: string } - type _sCSjbJu = CollectionEvent - interface CollectionErrorEvent extends _sCSjbJu { + type _svlTSZn = CollectionEvent + interface CollectionErrorEvent extends _svlTSZn { error: Error } - type _svrOoSl = hook.Event&RequestEvent&baseRecordEventData - interface FileTokenRequestEvent extends _svrOoSl { + type _sSViCTr = hook.Event&RequestEvent&baseRecordEventData + interface FileTokenRequestEvent extends _sSViCTr { token: string } - type _shFqQyX = hook.Event&RequestEvent&baseCollectionEventData - interface FileDownloadRequestEvent extends _shFqQyX { + type _sUCJiMm = hook.Event&RequestEvent&baseCollectionEventData + interface FileDownloadRequestEvent extends _sUCJiMm { record?: Record fileField?: FileField servedPath: string servedName: string } - type _sPAwJlM = hook.Event&RequestEvent - interface CollectionsListRequestEvent extends _sPAwJlM { + type _sePkyEK = hook.Event&RequestEvent + interface CollectionsListRequestEvent extends _sePkyEK { collections: Array<(Collection | undefined)> result?: search.Result } - type _sVYovuZ = hook.Event&RequestEvent - interface CollectionsImportRequestEvent extends _sVYovuZ { + type _shaTlcQ = hook.Event&RequestEvent + interface CollectionsImportRequestEvent extends _shaTlcQ { collectionsData: Array<_TygojaDict> deleteMissing: boolean } - type _sMhLnqr = hook.Event&RequestEvent&baseCollectionEventData - interface CollectionRequestEvent extends _sMhLnqr { + type _sxHnQot = hook.Event&RequestEvent&baseCollectionEventData + interface CollectionRequestEvent extends _sxHnQot { } - type _sbABKlN = hook.Event&RequestEvent - interface RealtimeConnectRequestEvent extends _sbABKlN { + type _sNjmWcZ = hook.Event&RequestEvent + interface RealtimeConnectRequestEvent extends _sNjmWcZ { client: subscriptions.Client /** * note: modifying it after the connect has no effect */ idleTimeout: time.Duration } - type _sHtWqZu = hook.Event&RequestEvent - interface RealtimeMessageEvent extends _sHtWqZu { + type _slmLyoh = hook.Event&RequestEvent + interface RealtimeMessageEvent extends _slmLyoh { client: subscriptions.Client message?: subscriptions.Message } - type _ssIPDrl = hook.Event&RequestEvent - interface RealtimeSubscribeRequestEvent extends _ssIPDrl { + type _sulAzJb = hook.Event&RequestEvent + interface RealtimeSubscribeRequestEvent extends _sulAzJb { client: subscriptions.Client subscriptions: Array } - type _sjPsfsj = hook.Event&RequestEvent&baseCollectionEventData - interface RecordsListRequestEvent extends _sjPsfsj { + type _sXigLxV = hook.Event&RequestEvent&baseCollectionEventData + interface RecordsListRequestEvent extends _sXigLxV { /** * @todo consider removing and maybe add as generic to the search.Result? */ records: Array<(Record | undefined)> result?: search.Result } - type _sVQdwlx = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEvent extends _sVQdwlx { + type _sqQzOXg = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEvent extends _sqQzOXg { record?: Record } - type _smcNWnr = hook.Event&baseRecordEventData - interface RecordEnrichEvent extends _smcNWnr { + type _stotPGK = hook.Event&baseRecordEventData + interface RecordEnrichEvent extends _stotPGK { app: App requestInfo?: RequestInfo } - type _svwNaom = hook.Event&RequestEvent&baseCollectionEventData - interface RecordCreateOTPRequestEvent extends _svwNaom { + type _sDMdwoi = hook.Event&RequestEvent&baseCollectionEventData + interface RecordCreateOTPRequestEvent extends _sDMdwoi { record?: Record password: string } - type _sRSaRpF = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOTPRequestEvent extends _sRSaRpF { + type _ssLtNCP = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOTPRequestEvent extends _ssLtNCP { record?: Record otp?: OTP } - type _sAvcHtH = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRequestEvent extends _sAvcHtH { + type _stTWsoG = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRequestEvent extends _stTWsoG { record?: Record token: string meta: any authMethod: string } - type _sYBrhxG = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithPasswordRequestEvent extends _sYBrhxG { + type _spQHqvF = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithPasswordRequestEvent extends _spQHqvF { record?: Record identity: string identityField: string password: string } - type _sADRGLW = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOAuth2RequestEvent extends _sADRGLW { + type _sGsEaIG = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOAuth2RequestEvent extends _sGsEaIG { providerName: string providerClient: auth.Provider record?: Record @@ -8879,41 +8984,41 @@ namespace core { createData: _TygojaDict isNewRecord: boolean } - type _sGtuWlW = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRefreshRequestEvent extends _sGtuWlW { + type _sWbiWdh = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRefreshRequestEvent extends _sWbiWdh { record?: Record } - type _sQdJutQ = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestPasswordResetRequestEvent extends _sQdJutQ { + type _sNOIpAU = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestPasswordResetRequestEvent extends _sNOIpAU { record?: Record } - type _sldZEyd = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmPasswordResetRequestEvent extends _sldZEyd { + type _sKWCZSr = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmPasswordResetRequestEvent extends _sKWCZSr { record?: Record } - type _sSTIrHs = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestVerificationRequestEvent extends _sSTIrHs { + type _snPNiOG = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestVerificationRequestEvent extends _snPNiOG { record?: Record } - type _stvIucE = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmVerificationRequestEvent extends _stvIucE { + type _sedqfoM = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmVerificationRequestEvent extends _sedqfoM { record?: Record } - type _soFGzmQ = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEmailChangeRequestEvent extends _soFGzmQ { + type _sRCMXOd = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEmailChangeRequestEvent extends _sRCMXOd { record?: Record newEmail: string } - type _sgsKPlc = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmEmailChangeRequestEvent extends _sgsKPlc { + type _sLWbFDY = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmEmailChangeRequestEvent extends _sLWbFDY { record?: Record newEmail: string } /** * ExternalAuth defines a Record proxy for working with the externalAuths collection. */ - type _siNGdZi = Record - interface ExternalAuth extends _siNGdZi { + type _sVkvoBX = Record + interface ExternalAuth extends _sVkvoBX { } interface newExternalAuth { /** @@ -11375,8 +11480,8 @@ namespace core { interface onlyFieldType { type: string } - type _saCtIGV = Field - interface fieldWithType extends _saCtIGV { + type _sxxdCjA = Field + interface fieldWithType extends _sxxdCjA { type: string } interface fieldWithType { @@ -11408,8 +11513,8 @@ namespace core { */ scan(value: any): void } - type _sURHjWo = BaseModel - interface Log extends _sURHjWo { + type _sTNwqVq = BaseModel + interface Log extends _sTNwqVq { created: types.DateTime data: types.JSONMap message: string @@ -11455,8 +11560,8 @@ namespace core { /** * MFA defines a Record proxy for working with the mfas collection. */ - type _sqTUOiJ = Record - interface MFA extends _sqTUOiJ { + type _sRouWCC = Record + interface MFA extends _sRouWCC { } interface newMFA { /** @@ -11678,8 +11783,8 @@ namespace core { /** * OTP defines a Record proxy for working with the otps collection. */ - type _sQAQWqh = Record - interface OTP extends _sQAQWqh { + type _svKxBff = Record + interface OTP extends _svKxBff { } interface newOTP { /** @@ -11915,8 +12020,8 @@ namespace core { } interface runner { } - type _syfBnAy = BaseModel - interface Record extends _syfBnAy { + type _shqsgwn = BaseModel + interface Record extends _shqsgwn { } interface newRecord { /** @@ -12391,8 +12496,8 @@ namespace core { * BaseRecordProxy implements the [RecordProxy] interface and it is intended * to be used as embed to custom user provided Record proxy structs. */ - type _sXurpLQ = Record - interface BaseRecordProxy extends _sXurpLQ { + type _sJFEEpH = Record + interface BaseRecordProxy extends _sJFEEpH { } interface BaseRecordProxy { /** @@ -12641,8 +12746,8 @@ namespace core { /** * Settings defines the PocketBase app settings. */ - type _sejdoQs = settings - interface Settings extends _sejdoQs { + type _sgJMkoA = settings + interface Settings extends _sgJMkoA { } interface Settings { /** @@ -12943,8 +13048,8 @@ namespace core { */ durationTime(): time.Duration } - type _sIvFrcK = BaseModel - interface Param extends _sIvFrcK { + type _sBhlqLz = BaseModel + interface Param extends _sBhlqLz { created: types.DateTime updated: types.DateTime value: types.JSONRaw @@ -13230,111 +13335,6 @@ namespace forms { } } -/** - * Package template is a thin wrapper around the standard html/template - * and text/template packages that implements a convenient registry to - * load and cache templates on the fly concurrently. - * - * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. - * - * Example: - * - * ``` - * registry := template.NewRegistry() - * - * html1, err := registry.LoadFiles( - * // the files set wil be parsed only once and then cached - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "John"}) - * - * html2, err := registry.LoadFiles( - * // reuse the already parsed and cached files set - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "Jane"}) - * ``` - */ -namespace template { - interface newRegistry { - /** - * NewRegistry creates and initializes a new templates registry with - * some defaults (eg. global "raw" template function for unescaped HTML). - * - * Use the Registry.Load* methods to load templates into the registry. - */ - (): (Registry) - } - /** - * Registry defines a templates registry that is safe to be used by multiple goroutines. - * - * Use the Registry.Load* methods to load templates into the registry. - */ - interface Registry { - } - interface Registry { - /** - * AddFuncs registers new global template functions. - * - * The key of each map entry is the function name that will be used in the templates. - * If a function with the map entry name already exists it will be replaced with the new one. - * - * The value of each map entry is a function that must have either a - * single return value, or two return values of which the second has type error. - * - * Example: - * - * ``` - * r.AddFuncs(map[string]any{ - * "toUpper": func(str string) string { - * return strings.ToUppser(str) - * }, - * ... - * }) - * ``` - */ - addFuncs(funcs: _TygojaDict): (Registry) - } - interface Registry { - /** - * LoadFiles caches (if not already) the specified filenames set as a - * single template and returns a ready to use Renderer instance. - * - * There must be at least 1 filename specified. - */ - loadFiles(...filenames: string[]): (Renderer) - } - interface Registry { - /** - * LoadString caches (if not already) the specified inline string as a - * single template and returns a ready to use Renderer instance. - */ - loadString(text: string): (Renderer) - } - interface Registry { - /** - * LoadFS caches (if not already) the specified fs and globPatterns - * pair as single template and returns a ready to use Renderer instance. - * - * There must be at least 1 file matching the provided globPattern(s) - * (note that most file names serves as glob patterns matching themselves). - */ - loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) - } - /** - * Renderer defines a single parsed template. - */ - interface Renderer { - } - interface Renderer { - /** - * Render executes the template with the specified data as the dot object - * and returns the result as plain string. - */ - render(data: any): string - } -} - namespace apis { interface toApiError { /** @@ -13563,8 +13563,8 @@ namespace apis { */ (limitBytes: number): (hook.Handler) } - type _sIhUBGR = io.ReadCloser - interface limitedReader extends _sIhUBGR { + type _sozlRnQ = io.ReadCloser + interface limitedReader extends _sozlRnQ { } interface limitedReader { read(b: string|Array): number @@ -13715,8 +13715,8 @@ namespace apis { */ (config: GzipConfig): (hook.Handler) } - type _sBYfreL = http.ResponseWriter&io.Writer - interface gzipResponseWriter extends _sBYfreL { + type _sOuNOQE = http.ResponseWriter&io.Writer + interface gzipResponseWriter extends _sOuNOQE { } interface gzipResponseWriter { writeHeader(code: number): void @@ -13736,11 +13736,11 @@ namespace apis { interface gzipResponseWriter { unwrap(): http.ResponseWriter } - type _srkUvIO = sync.RWMutex - interface rateLimiter extends _srkUvIO { + type _sJPgwCd = sync.RWMutex + interface rateLimiter extends _sJPgwCd { } - type _sDSVbeS = sync.Mutex - interface fixedWindow extends _sDSVbeS { + type _sqviHKG = sync.Mutex + interface fixedWindow extends _sqviHKG { } interface realtimeSubscribeForm { clientId: string @@ -13981,8 +13981,8 @@ namespace pocketbase { * It implements [CoreApp] via embedding and all of the app interface methods * could be accessed directly through the instance (eg. PocketBase.DataDir()). */ - type _sAbdwRD = CoreApp - interface PocketBase extends _sAbdwRD { + type _slAyzAf = CoreApp + interface PocketBase extends _slAyzAf { /** * RootCmd is the main console command */ @@ -14214,6 +14214,169 @@ namespace sync { } } +/** + * Package io provides basic interfaces to I/O primitives. + * Its primary job is to wrap existing implementations of such primitives, + * such as those in package os, into shared public interfaces that + * abstract the functionality, plus some other related primitives. + * + * Because these interfaces and primitives wrap lower-level operations with + * various implementations, unless otherwise informed clients should not + * assume they are safe for parallel execution. + */ +namespace io { + /** + * Reader is the interface that wraps the basic Read method. + * + * Read reads up to len(p) bytes into p. It returns the number of bytes + * read (0 <= n <= len(p)) and any error encountered. Even if Read + * returns n < len(p), it may use all of p as scratch space during the call. + * If some data is available but not len(p) bytes, Read conventionally + * returns what is available instead of waiting for more. + * + * When Read encounters an error or end-of-file condition after + * successfully reading n > 0 bytes, it returns the number of + * bytes read. It may return the (non-nil) error from the same call + * or return the error (and n == 0) from a subsequent call. + * An instance of this general case is that a Reader returning + * a non-zero number of bytes at the end of the input stream may + * return either err == EOF or err == nil. The next Read should + * return 0, EOF. + * + * Callers should always process the n > 0 bytes returned before + * considering the error err. Doing so correctly handles I/O errors + * that happen after reading some bytes and also both of the + * allowed EOF behaviors. + * + * If len(p) == 0, Read should always return n == 0. It may return a + * non-nil error if some error condition is known, such as EOF. + * + * Implementations of Read are discouraged from returning a + * zero byte count with a nil error, except when len(p) == 0. + * Callers should treat a return of 0 and nil as indicating that + * nothing happened; in particular it does not indicate EOF. + * + * Implementations must not retain p. + */ + interface Reader { + [key:string]: any; + read(p: string|Array): number + } + /** + * Writer is the interface that wraps the basic Write method. + * + * Write writes len(p) bytes from p to the underlying data stream. + * It returns the number of bytes written from p (0 <= n <= len(p)) + * and any error encountered that caused the write to stop early. + * Write must return a non-nil error if it returns n < len(p). + * Write must not modify the slice data, even temporarily. + * + * Implementations must not retain p. + */ + interface Writer { + [key:string]: any; + write(p: string|Array): number + } + /** + * ReadCloser is the interface that groups the basic Read and Close methods. + */ + interface ReadCloser { + [key:string]: any; + } + /** + * ReadSeekCloser is the interface that groups the basic Read, Seek and Close + * methods. + */ + interface ReadSeekCloser { + [key:string]: any; + } +} + +/** + * Package bytes implements functions for the manipulation of byte slices. + * It is analogous to the facilities of the [strings] package. + */ +namespace bytes { + /** + * A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker], + * [io.ByteScanner], and [io.RuneScanner] interfaces by reading from + * a byte slice. + * Unlike a [Buffer], a Reader is read-only and supports seeking. + * The zero value for Reader operates like a Reader of an empty slice. + */ + interface Reader { + } + interface Reader { + /** + * Len returns the number of bytes of the unread portion of the + * slice. + */ + len(): number + } + interface Reader { + /** + * Size returns the original length of the underlying byte slice. + * Size is the number of bytes available for reading via [Reader.ReadAt]. + * The result is unaffected by any method calls except [Reader.Reset]. + */ + size(): number + } + interface Reader { + /** + * Read implements the [io.Reader] interface. + */ + read(b: string|Array): number + } + interface Reader { + /** + * ReadAt implements the [io.ReaderAt] interface. + */ + readAt(b: string|Array, off: number): number + } + interface Reader { + /** + * ReadByte implements the [io.ByteReader] interface. + */ + readByte(): number + } + interface Reader { + /** + * UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface. + */ + unreadByte(): void + } + interface Reader { + /** + * ReadRune implements the [io.RuneReader] interface. + */ + readRune(): [number, number] + } + interface Reader { + /** + * UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface. + */ + unreadRune(): void + } + interface Reader { + /** + * Seek implements the [io.Seeker] interface. + */ + seek(offset: number, whence: number): number + } + interface Reader { + /** + * WriteTo implements the [io.WriterTo] interface. + */ + writeTo(w: io.Writer): number + } + interface Reader { + /** + * Reset resets the [Reader] to be reading from b. + */ + reset(b: string|Array): void + } +} + /** * Package syscall contains an interface to the low-level operating system * primitives. The details vary depending on the underlying system, and @@ -14947,247 +15110,6 @@ namespace time { } } -/** - * Package context defines the Context type, which carries deadlines, - * cancellation signals, and other request-scoped values across API boundaries - * and between processes. - * - * Incoming requests to a server should create a [Context], and outgoing - * calls to servers should accept a Context. The chain of function - * calls between them must propagate the Context, optionally replacing - * it with a derived Context created using [WithCancel], [WithDeadline], - * [WithTimeout], or [WithValue]. When a Context is canceled, all - * Contexts derived from it are also canceled. - * - * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a - * Context (the parent) and return a derived Context (the child) and a - * [CancelFunc]. Calling the CancelFunc cancels the child and its - * children, removes the parent's reference to the child, and stops - * any associated timers. Failing to call the CancelFunc leaks the - * child and its children until the parent is canceled or the timer - * fires. The go vet tool checks that CancelFuncs are used on all - * control-flow paths. - * - * The [WithCancelCause] function returns a [CancelCauseFunc], which - * takes an error and records it as the cancellation cause. Calling - * [Cause] on the canceled context or any of its children retrieves - * the cause. If no cause is specified, Cause(ctx) returns the same - * value as ctx.Err(). - * - * Programs that use Contexts should follow these rules to keep interfaces - * consistent across packages and enable static analysis tools to check context - * propagation: - * - * Do not store Contexts inside a struct type; instead, pass a Context - * explicitly to each function that needs it. The Context should be the first - * parameter, typically named ctx: - * - * ``` - * func DoSomething(ctx context.Context, arg Arg) error { - * // ... use ctx ... - * } - * ``` - * - * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] - * if you are unsure about which Context to use. - * - * Use context Values only for request-scoped data that transits processes and - * APIs, not for passing optional parameters to functions. - * - * The same Context may be passed to functions running in different goroutines; - * Contexts are safe for simultaneous use by multiple goroutines. - * - * See https://blog.golang.org/context for example code for a server that uses - * Contexts. - */ -namespace context { - /** - * A Context carries a deadline, a cancellation signal, and other values across - * API boundaries. - * - * Context's methods may be called by multiple goroutines simultaneously. - */ - interface Context { - [key:string]: any; - /** - * Deadline returns the time when work done on behalf of this context - * should be canceled. Deadline returns ok==false when no deadline is - * set. Successive calls to Deadline return the same results. - */ - deadline(): [time.Time, boolean] - /** - * Done returns a channel that's closed when work done on behalf of this - * context should be canceled. Done may return nil if this context can - * never be canceled. Successive calls to Done return the same value. - * The close of the Done channel may happen asynchronously, - * after the cancel function returns. - * - * WithCancel arranges for Done to be closed when cancel is called; - * WithDeadline arranges for Done to be closed when the deadline - * expires; WithTimeout arranges for Done to be closed when the timeout - * elapses. - * - * Done is provided for use in select statements: - * - * // Stream generates values with DoSomething and sends them to out - * // until DoSomething returns an error or ctx.Done is closed. - * func Stream(ctx context.Context, out chan<- Value) error { - * for { - * v, err := DoSomething(ctx) - * if err != nil { - * return err - * } - * select { - * case <-ctx.Done(): - * return ctx.Err() - * case out <- v: - * } - * } - * } - * - * See https://blog.golang.org/pipelines for more examples of how to use - * a Done channel for cancellation. - */ - done(): undefined - /** - * If Done is not yet closed, Err returns nil. - * If Done is closed, Err returns a non-nil error explaining why: - * Canceled if the context was canceled - * or DeadlineExceeded if the context's deadline passed. - * After Err returns a non-nil error, successive calls to Err return the same error. - */ - err(): void - /** - * Value returns the value associated with this context for key, or nil - * if no value is associated with key. Successive calls to Value with - * the same key returns the same result. - * - * Use context values only for request-scoped data that transits - * processes and API boundaries, not for passing optional parameters to - * functions. - * - * A key identifies a specific value in a Context. Functions that wish - * to store values in Context typically allocate a key in a global - * variable then use that key as the argument to context.WithValue and - * Context.Value. A key can be any type that supports equality; - * packages should define keys as an unexported type to avoid - * collisions. - * - * Packages that define a Context key should provide type-safe accessors - * for the values stored using that key: - * - * ``` - * // Package user defines a User type that's stored in Contexts. - * package user - * - * import "context" - * - * // User is the type of value stored in the Contexts. - * type User struct {...} - * - * // key is an unexported type for keys defined in this package. - * // This prevents collisions with keys defined in other packages. - * type key int - * - * // userKey is the key for user.User values in Contexts. It is - * // unexported; clients use user.NewContext and user.FromContext - * // instead of using this key directly. - * var userKey key - * - * // NewContext returns a new Context that carries value u. - * func NewContext(ctx context.Context, u *User) context.Context { - * return context.WithValue(ctx, userKey, u) - * } - * - * // FromContext returns the User value stored in ctx, if any. - * func FromContext(ctx context.Context) (*User, bool) { - * u, ok := ctx.Value(userKey).(*User) - * return u, ok - * } - * ``` - */ - value(key: any): any - } -} - -/** - * Package io provides basic interfaces to I/O primitives. - * Its primary job is to wrap existing implementations of such primitives, - * such as those in package os, into shared public interfaces that - * abstract the functionality, plus some other related primitives. - * - * Because these interfaces and primitives wrap lower-level operations with - * various implementations, unless otherwise informed clients should not - * assume they are safe for parallel execution. - */ -namespace io { - /** - * Reader is the interface that wraps the basic Read method. - * - * Read reads up to len(p) bytes into p. It returns the number of bytes - * read (0 <= n <= len(p)) and any error encountered. Even if Read - * returns n < len(p), it may use all of p as scratch space during the call. - * If some data is available but not len(p) bytes, Read conventionally - * returns what is available instead of waiting for more. - * - * When Read encounters an error or end-of-file condition after - * successfully reading n > 0 bytes, it returns the number of - * bytes read. It may return the (non-nil) error from the same call - * or return the error (and n == 0) from a subsequent call. - * An instance of this general case is that a Reader returning - * a non-zero number of bytes at the end of the input stream may - * return either err == EOF or err == nil. The next Read should - * return 0, EOF. - * - * Callers should always process the n > 0 bytes returned before - * considering the error err. Doing so correctly handles I/O errors - * that happen after reading some bytes and also both of the - * allowed EOF behaviors. - * - * If len(p) == 0, Read should always return n == 0. It may return a - * non-nil error if some error condition is known, such as EOF. - * - * Implementations of Read are discouraged from returning a - * zero byte count with a nil error, except when len(p) == 0. - * Callers should treat a return of 0 and nil as indicating that - * nothing happened; in particular it does not indicate EOF. - * - * Implementations must not retain p. - */ - interface Reader { - [key:string]: any; - read(p: string|Array): number - } - /** - * Writer is the interface that wraps the basic Write method. - * - * Write writes len(p) bytes from p to the underlying data stream. - * It returns the number of bytes written from p (0 <= n <= len(p)) - * and any error encountered that caused the write to stop early. - * Write must return a non-nil error if it returns n < len(p). - * Write must not modify the slice data, even temporarily. - * - * Implementations must not retain p. - */ - interface Writer { - [key:string]: any; - write(p: string|Array): number - } - /** - * ReadCloser is the interface that groups the basic Read and Close methods. - */ - interface ReadCloser { - [key:string]: any; - } - /** - * ReadSeekCloser is the interface that groups the basic Read, Seek and Close - * methods. - */ - interface ReadSeekCloser { - [key:string]: any; - } -} - /** * Package fs defines basic interfaces to a file system. * A file system can be provided by the host operating system @@ -15389,87 +15311,840 @@ namespace fs { } /** - * Package bytes implements functions for the manipulation of byte slices. - * It is analogous to the facilities of the [strings] package. + * Package context defines the Context type, which carries deadlines, + * cancellation signals, and other request-scoped values across API boundaries + * and between processes. + * + * Incoming requests to a server should create a [Context], and outgoing + * calls to servers should accept a Context. The chain of function + * calls between them must propagate the Context, optionally replacing + * it with a derived Context created using [WithCancel], [WithDeadline], + * [WithTimeout], or [WithValue]. When a Context is canceled, all + * Contexts derived from it are also canceled. + * + * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a + * Context (the parent) and return a derived Context (the child) and a + * [CancelFunc]. Calling the CancelFunc cancels the child and its + * children, removes the parent's reference to the child, and stops + * any associated timers. Failing to call the CancelFunc leaks the + * child and its children until the parent is canceled or the timer + * fires. The go vet tool checks that CancelFuncs are used on all + * control-flow paths. + * + * The [WithCancelCause] function returns a [CancelCauseFunc], which + * takes an error and records it as the cancellation cause. Calling + * [Cause] on the canceled context or any of its children retrieves + * the cause. If no cause is specified, Cause(ctx) returns the same + * value as ctx.Err(). + * + * Programs that use Contexts should follow these rules to keep interfaces + * consistent across packages and enable static analysis tools to check context + * propagation: + * + * Do not store Contexts inside a struct type; instead, pass a Context + * explicitly to each function that needs it. The Context should be the first + * parameter, typically named ctx: + * + * ``` + * func DoSomething(ctx context.Context, arg Arg) error { + * // ... use ctx ... + * } + * ``` + * + * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] + * if you are unsure about which Context to use. + * + * Use context Values only for request-scoped data that transits processes and + * APIs, not for passing optional parameters to functions. + * + * The same Context may be passed to functions running in different goroutines; + * Contexts are safe for simultaneous use by multiple goroutines. + * + * See https://blog.golang.org/context for example code for a server that uses + * Contexts. */ -namespace bytes { +namespace context { /** - * A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker], - * [io.ByteScanner], and [io.RuneScanner] interfaces by reading from - * a byte slice. - * Unlike a [Buffer], a Reader is read-only and supports seeking. - * The zero value for Reader operates like a Reader of an empty slice. + * A Context carries a deadline, a cancellation signal, and other values across + * API boundaries. + * + * Context's methods may be called by multiple goroutines simultaneously. */ - interface Reader { - } - interface Reader { + interface Context { + [key:string]: any; /** - * Len returns the number of bytes of the unread portion of the - * slice. + * Deadline returns the time when work done on behalf of this context + * should be canceled. Deadline returns ok==false when no deadline is + * set. Successive calls to Deadline return the same results. */ - len(): number - } - interface Reader { + deadline(): [time.Time, boolean] /** - * Size returns the original length of the underlying byte slice. - * Size is the number of bytes available for reading via [Reader.ReadAt]. - * The result is unaffected by any method calls except [Reader.Reset]. + * Done returns a channel that's closed when work done on behalf of this + * context should be canceled. Done may return nil if this context can + * never be canceled. Successive calls to Done return the same value. + * The close of the Done channel may happen asynchronously, + * after the cancel function returns. + * + * WithCancel arranges for Done to be closed when cancel is called; + * WithDeadline arranges for Done to be closed when the deadline + * expires; WithTimeout arranges for Done to be closed when the timeout + * elapses. + * + * Done is provided for use in select statements: + * + * // Stream generates values with DoSomething and sends them to out + * // until DoSomething returns an error or ctx.Done is closed. + * func Stream(ctx context.Context, out chan<- Value) error { + * for { + * v, err := DoSomething(ctx) + * if err != nil { + * return err + * } + * select { + * case <-ctx.Done(): + * return ctx.Err() + * case out <- v: + * } + * } + * } + * + * See https://blog.golang.org/pipelines for more examples of how to use + * a Done channel for cancellation. */ - size(): number - } - interface Reader { + done(): undefined /** - * Read implements the [io.Reader] interface. + * If Done is not yet closed, Err returns nil. + * If Done is closed, Err returns a non-nil error explaining why: + * Canceled if the context was canceled + * or DeadlineExceeded if the context's deadline passed. + * After Err returns a non-nil error, successive calls to Err return the same error. */ - read(b: string|Array): number - } - interface Reader { + err(): void /** - * ReadAt implements the [io.ReaderAt] interface. + * Value returns the value associated with this context for key, or nil + * if no value is associated with key. Successive calls to Value with + * the same key returns the same result. + * + * Use context values only for request-scoped data that transits + * processes and API boundaries, not for passing optional parameters to + * functions. + * + * A key identifies a specific value in a Context. Functions that wish + * to store values in Context typically allocate a key in a global + * variable then use that key as the argument to context.WithValue and + * Context.Value. A key can be any type that supports equality; + * packages should define keys as an unexported type to avoid + * collisions. + * + * Packages that define a Context key should provide type-safe accessors + * for the values stored using that key: + * + * ``` + * // Package user defines a User type that's stored in Contexts. + * package user + * + * import "context" + * + * // User is the type of value stored in the Contexts. + * type User struct {...} + * + * // key is an unexported type for keys defined in this package. + * // This prevents collisions with keys defined in other packages. + * type key int + * + * // userKey is the key for user.User values in Contexts. It is + * // unexported; clients use user.NewContext and user.FromContext + * // instead of using this key directly. + * var userKey key + * + * // NewContext returns a new Context that carries value u. + * func NewContext(ctx context.Context, u *User) context.Context { + * return context.WithValue(ctx, userKey, u) + * } + * + * // FromContext returns the User value stored in ctx, if any. + * func FromContext(ctx context.Context) (*User, bool) { + * u, ok := ctx.Value(userKey).(*User) + * return u, ok + * } + * ``` */ - readAt(b: string|Array, off: number): number + value(key: any): any } - interface Reader { +} + +/** + * Package sql provides a generic interface around SQL (or SQL-like) + * databases. + * + * The sql package must be used in conjunction with a database driver. + * See https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until + * after the query is completed. + * + * For usage examples, see the wiki page at + * https://golang.org/s/sqlwiki. + */ +namespace sql { + /** + * TxOptions holds the transaction options to be used in [DB.BeginTx]. + */ + interface TxOptions { /** - * ReadByte implements the [io.ByteReader] interface. + * Isolation is the transaction isolation level. + * If zero, the driver or database's default level is used. */ - readByte(): number + isolation: IsolationLevel + readOnly: boolean } - interface Reader { - /** - * UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface. - */ - unreadByte(): void + /** + * NullString represents a string that may be null. + * NullString implements the [Scanner] interface so + * it can be used as a scan destination: + * + * ``` + * var s NullString + * err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s) + * ... + * if s.Valid { + * // use s.String + * } else { + * // NULL value + * } + * ``` + */ + interface NullString { + string: string + valid: boolean // Valid is true if String is not NULL } - interface Reader { + interface NullString { /** - * ReadRune implements the [io.RuneReader] interface. + * Scan implements the [Scanner] interface. */ - readRune(): [number, number] + scan(value: any): void } - interface Reader { + interface NullString { /** - * UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface. + * Value implements the [driver.Valuer] interface. */ - unreadRune(): void + value(): any } - interface Reader { - /** - * Seek implements the [io.Seeker] interface. - */ - seek(offset: number, whence: number): number + /** + * DB is a database handle representing a pool of zero or more + * underlying connections. It's safe for concurrent use by multiple + * goroutines. + * + * The sql package creates and frees connections automatically; it + * also maintains a free pool of idle connections. If the database has + * a concept of per-connection state, such state can be reliably observed + * within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the + * returned [Tx] is bound to a single connection. Once [Tx.Commit] or + * [Tx.Rollback] is called on the transaction, that transaction's + * connection is returned to [DB]'s idle connection pool. The pool size + * can be controlled with [DB.SetMaxIdleConns]. + */ + interface DB { } - interface Reader { + interface DB { /** - * WriteTo implements the [io.WriterTo] interface. + * PingContext verifies a connection to the database is still alive, + * establishing a connection if necessary. */ - writeTo(w: io.Writer): number + pingContext(ctx: context.Context): void } - interface Reader { + interface DB { /** - * Reset resets the [Reader] to be reading from b. + * Ping verifies a connection to the database is still alive, + * establishing a connection if necessary. + * + * Ping uses [context.Background] internally; to specify the context, use + * [DB.PingContext]. */ - reset(b: string|Array): void + ping(): void + } + interface DB { + /** + * Close closes the database and prevents new queries from starting. + * Close then waits for all queries that have started processing on the server + * to finish. + * + * It is rare to Close a [DB], as the [DB] handle is meant to be + * long-lived and shared between many goroutines. + */ + close(): void + } + interface DB { + /** + * SetMaxIdleConns sets the maximum number of connections in the idle + * connection pool. + * + * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns, + * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit. + * + * If n <= 0, no idle connections are retained. + * + * The default max idle connections is currently 2. This may change in + * a future release. + */ + setMaxIdleConns(n: number): void + } + interface DB { + /** + * SetMaxOpenConns sets the maximum number of open connections to the database. + * + * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than + * MaxIdleConns, then MaxIdleConns will be reduced to match the new + * MaxOpenConns limit. + * + * If n <= 0, then there is no limit on the number of open connections. + * The default is 0 (unlimited). + */ + setMaxOpenConns(n: number): void + } + interface DB { + /** + * SetConnMaxLifetime sets the maximum amount of time a connection may be reused. + * + * Expired connections may be closed lazily before reuse. + * + * If d <= 0, connections are not closed due to a connection's age. + */ + setConnMaxLifetime(d: time.Duration): void + } + interface DB { + /** + * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle. + * + * Expired connections may be closed lazily before reuse. + * + * If d <= 0, connections are not closed due to a connection's idle time. + */ + setConnMaxIdleTime(d: time.Duration): void + } + interface DB { + /** + * Stats returns database statistics. + */ + stats(): DBStats + } + interface DB { + /** + * PrepareContext creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface DB { + /** + * Prepare creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * Prepare uses [context.Background] internally; to specify the context, use + * [DB.PrepareContext]. + */ + prepare(query: string): (Stmt) + } + interface DB { + /** + * ExecContext executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface DB { + /** + * Exec executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + * + * Exec uses [context.Background] internally; to specify the context, use + * [DB.ExecContext]. + */ + exec(query: string, ...args: any[]): Result + } + interface DB { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface DB { + /** + * Query executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + * + * Query uses [context.Background] internally; to specify the context, use + * [DB.QueryContext]. + */ + query(query: string, ...args: any[]): (Rows) + } + interface DB { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface DB { + /** + * QueryRow executes a query that is expected to return at most one row. + * QueryRow always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, [*Row.Scan] scans the first selected row and discards + * the rest. + * + * QueryRow uses [context.Background] internally; to specify the context, use + * [DB.QueryRowContext]. + */ + queryRow(query: string, ...args: any[]): (Row) + } + interface DB { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled back. + * If the context is canceled, the sql package will roll back + * the transaction. [Tx.Commit] will return an error if the context provided to + * BeginTx is canceled. + * + * The provided [TxOptions] is optional and may be nil if defaults should be used. + * If a non-default isolation level is used that the driver doesn't support, + * an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): (Tx) + } + interface DB { + /** + * Begin starts a transaction. The default isolation level is dependent on + * the driver. + * + * Begin uses [context.Background] internally; to specify the context, use + * [DB.BeginTx]. + */ + begin(): (Tx) + } + interface DB { + /** + * Driver returns the database's underlying driver. + */ + driver(): any + } + interface DB { + /** + * Conn returns a single connection by either opening a new connection + * or returning an existing connection from the connection pool. Conn will + * block until either a connection is returned or ctx is canceled. + * Queries run on the same Conn will be run in the same database session. + * + * Every Conn must be returned to the database pool after use by + * calling [Conn.Close]. + */ + conn(ctx: context.Context): (Conn) + } + /** + * Tx is an in-progress database transaction. + * + * A transaction must end with a call to [Tx.Commit] or [Tx.Rollback]. + * + * After a call to [Tx.Commit] or [Tx.Rollback], all operations on the + * transaction fail with [ErrTxDone]. + * + * The statements prepared for a transaction by calling + * the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed + * by the call to [Tx.Commit] or [Tx.Rollback]. + */ + interface Tx { + } + interface Tx { + /** + * Commit commits the transaction. + */ + commit(): void + } + interface Tx { + /** + * Rollback aborts the transaction. + */ + rollback(): void + } + interface Tx { + /** + * PrepareContext creates a prepared statement for use within a transaction. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * To use an existing prepared statement on this transaction, see [Tx.Stmt]. + * + * The provided context will be used for the preparation of the context, not + * for the execution of the returned statement. The returned statement + * will run in the transaction context. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface Tx { + /** + * Prepare creates a prepared statement for use within a transaction. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * To use an existing prepared statement on this transaction, see [Tx.Stmt]. + * + * Prepare uses [context.Background] internally; to specify the context, use + * [Tx.PrepareContext]. + */ + prepare(query: string): (Stmt) + } + interface Tx { + /** + * StmtContext returns a transaction-specific prepared statement from + * an existing statement. + * + * Example: + * + * ``` + * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") + * ... + * tx, err := db.Begin() + * ... + * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203) + * ``` + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + */ + stmtContext(ctx: context.Context, stmt: Stmt): (Stmt) + } + interface Tx { + /** + * Stmt returns a transaction-specific prepared statement from + * an existing statement. + * + * Example: + * + * ``` + * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") + * ... + * tx, err := db.Begin() + * ... + * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203) + * ``` + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * Stmt uses [context.Background] internally; to specify the context, use + * [Tx.StmtContext]. + */ + stmt(stmt: Stmt): (Stmt) + } + interface Tx { + /** + * ExecContext executes a query that doesn't return rows. + * For example: an INSERT and UPDATE. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Tx { + /** + * Exec executes a query that doesn't return rows. + * For example: an INSERT and UPDATE. + * + * Exec uses [context.Background] internally; to specify the context, use + * [Tx.ExecContext]. + */ + exec(query: string, ...args: any[]): Result + } + interface Tx { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface Tx { + /** + * Query executes a query that returns rows, typically a SELECT. + * + * Query uses [context.Background] internally; to specify the context, use + * [Tx.QueryContext]. + */ + query(query: string, ...args: any[]): (Rows) + } + interface Tx { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface Tx { + /** + * QueryRow executes a query that is expected to return at most one row. + * QueryRow always returns a non-nil value. Errors are deferred until + * [Row]'s Scan method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + * + * QueryRow uses [context.Background] internally; to specify the context, use + * [Tx.QueryRowContext]. + */ + queryRow(query: string, ...args: any[]): (Row) + } + /** + * Stmt is a prepared statement. + * A Stmt is safe for concurrent use by multiple goroutines. + * + * If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single + * underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will + * become unusable and all operations will return an error. + * If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the + * [DB]. When the Stmt needs to execute on a new underlying connection, it will + * prepare itself on the new connection automatically. + */ + interface Stmt { + } + interface Stmt { + /** + * ExecContext executes a prepared statement with the given arguments and + * returns a [Result] summarizing the effect of the statement. + */ + execContext(ctx: context.Context, ...args: any[]): Result + } + interface Stmt { + /** + * Exec executes a prepared statement with the given arguments and + * returns a [Result] summarizing the effect of the statement. + * + * Exec uses [context.Background] internally; to specify the context, use + * [Stmt.ExecContext]. + */ + exec(...args: any[]): Result + } + interface Stmt { + /** + * QueryContext executes a prepared query statement with the given arguments + * and returns the query results as a [*Rows]. + */ + queryContext(ctx: context.Context, ...args: any[]): (Rows) + } + interface Stmt { + /** + * Query executes a prepared query statement with the given arguments + * and returns the query results as a *Rows. + * + * Query uses [context.Background] internally; to specify the context, use + * [Stmt.QueryContext]. + */ + query(...args: any[]): (Rows) + } + interface Stmt { + /** + * QueryRowContext executes a prepared query statement with the given arguments. + * If an error occurs during the execution of the statement, that error will + * be returned by a call to Scan on the returned [*Row], which is always non-nil. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, ...args: any[]): (Row) + } + interface Stmt { + /** + * QueryRow executes a prepared query statement with the given arguments. + * If an error occurs during the execution of the statement, that error will + * be returned by a call to Scan on the returned [*Row], which is always non-nil. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + * + * Example usage: + * + * ``` + * var name string + * err := nameByUseridStmt.QueryRow(id).Scan(&name) + * ``` + * + * QueryRow uses [context.Background] internally; to specify the context, use + * [Stmt.QueryRowContext]. + */ + queryRow(...args: any[]): (Row) + } + interface Stmt { + /** + * Close closes the statement. + */ + close(): void + } + /** + * Rows is the result of a query. Its cursor starts before the first row + * of the result set. Use [Rows.Next] to advance from row to row. + */ + interface Rows { + } + interface Rows { + /** + * Next prepares the next result row for reading with the [Rows.Scan] method. It + * returns true on success, or false if there is no next result row or an error + * happened while preparing it. [Rows.Err] should be consulted to distinguish between + * the two cases. + * + * Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next]. + */ + next(): boolean + } + interface Rows { + /** + * NextResultSet prepares the next result set for reading. It reports whether + * there is further result sets, or false if there is no further result set + * or if there is an error advancing to it. The [Rows.Err] method should be consulted + * to distinguish between the two cases. + * + * After calling NextResultSet, the [Rows.Next] method should always be called before + * scanning. If there are further result sets they may not have rows in the result + * set. + */ + nextResultSet(): boolean + } + interface Rows { + /** + * Err returns the error, if any, that was encountered during iteration. + * Err may be called after an explicit or implicit [Rows.Close]. + */ + err(): void + } + interface Rows { + /** + * Columns returns the column names. + * Columns returns an error if the rows are closed. + */ + columns(): Array + } + interface Rows { + /** + * ColumnTypes returns column information such as column type, length, + * and nullable. Some information may not be available from some drivers. + */ + columnTypes(): Array<(ColumnType | undefined)> + } + interface Rows { + /** + * Scan copies the columns in the current row into the values pointed + * at by dest. The number of values in dest must be the same as the + * number of columns in [Rows]. + * + * Scan converts columns read from the database into the following + * common Go types and special types provided by the sql package: + * + * ``` + * *string + * *[]byte + * *int, *int8, *int16, *int32, *int64 + * *uint, *uint8, *uint16, *uint32, *uint64 + * *bool + * *float32, *float64 + * *interface{} + * *RawBytes + * *Rows (cursor value) + * any type implementing Scanner (see Scanner docs) + * ``` + * + * In the most simple case, if the type of the value from the source + * column is an integer, bool or string type T and dest is of type *T, + * Scan simply assigns the value through the pointer. + * + * Scan also converts between string and numeric types, as long as no + * information would be lost. While Scan stringifies all numbers + * scanned from numeric database columns into *string, scans into + * numeric types are checked for overflow. For example, a float64 with + * value 300 or a string with value "300" can scan into a uint16, but + * not into a uint8, though float64(255) or "255" can scan into a + * uint8. One exception is that scans of some float64 numbers to + * strings may lose information when stringifying. In general, scan + * floating point columns into *float64. + * + * If a dest argument has type *[]byte, Scan saves in that argument a + * copy of the corresponding data. The copy is owned by the caller and + * can be modified and held indefinitely. The copy can be avoided by + * using an argument of type [*RawBytes] instead; see the documentation + * for [RawBytes] for restrictions on its use. + * + * If an argument has type *interface{}, Scan copies the value + * provided by the underlying driver without conversion. When scanning + * from a source value of type []byte to *interface{}, a copy of the + * slice is made and the caller owns the result. + * + * Source values of type [time.Time] may be scanned into values of type + * *time.Time, *interface{}, *string, or *[]byte. When converting to + * the latter two, [time.RFC3339Nano] is used. + * + * Source values of type bool may be scanned into types *bool, + * *interface{}, *string, *[]byte, or [*RawBytes]. + * + * For scanning into *bool, the source may be true, false, 1, 0, or + * string inputs parseable by [strconv.ParseBool]. + * + * Scan can also convert a cursor returned from a query, such as + * "select cursor(select * from my_table) from dual", into a + * [*Rows] value that can itself be scanned from. The parent + * select query will close any cursor [*Rows] if the parent [*Rows] is closed. + * + * If any of the first arguments implementing [Scanner] returns an error, + * that error will be wrapped in the returned error. + */ + scan(...dest: any[]): void + } + interface Rows { + /** + * Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called + * and returns false and there are no further result sets, + * the [Rows] are closed automatically and it will suffice to check the + * result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err]. + */ + close(): void + } + /** + * A Result summarizes an executed SQL command. + */ + interface Result { + [key:string]: any; + /** + * LastInsertId returns the integer generated by the database + * in response to a command. Typically this will be from an + * "auto increment" column when inserting a new row. Not all + * databases support this feature, and the syntax of such + * statements varies. + */ + lastInsertId(): number + /** + * RowsAffected returns the number of rows affected by an + * update, insert, or delete. Not every database or database + * driver may support this. + */ + rowsAffected(): number } } @@ -15635,198 +16310,129 @@ namespace syntax { interface Flags extends Number{} } -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { +namespace store { /** - * DateTime represents a [time.Time] instance in UTC that is wrapped - * and serialized using the app default date layout. + * Store defines a concurrent safe in memory key-value data store. */ - interface DateTime { + interface Store { } - interface DateTime { + interface Store { /** - * Time returns the internal [time.Time] instance. + * Reset clears the store and replaces the store data with a + * shallow copy of the provided newData. */ - time(): time.Time + reset(newData: _TygojaDict): void } - interface DateTime { + interface Store { /** - * Add returns a new DateTime based on the current DateTime + the specified duration. + * Length returns the current number of elements in the store. */ - add(duration: time.Duration): DateTime + length(): number } - interface DateTime { + interface Store { /** - * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one. + * RemoveAll removes all the existing store entries. + */ + removeAll(): void + } + interface Store { + /** + * Remove removes a single entry from the store. * - * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], - * the maximum (or minimum) duration will be returned. + * Remove does nothing if key doesn't exist in the store. */ - sub(u: DateTime): time.Duration + remove(key: K): void } - interface DateTime { + interface Store { /** - * AddDate returns a new DateTime based on the current one + duration. + * Has checks if element with the specified key exist or not. + */ + has(key: K): boolean + } + interface Store { + /** + * Get returns a single element value from the store. * - * It follows the same rules as [time.AddDate]. + * If key is not set, the zero T value is returned. */ - addDate(years: number, months: number, days: number): DateTime + get(key: K): T } - interface DateTime { + interface Store { /** - * After reports whether the current DateTime instance is after u. + * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not. */ - after(u: DateTime): boolean + getOk(key: K): [T, boolean] } - interface DateTime { + interface Store { /** - * Before reports whether the current DateTime instance is before u. + * GetAll returns a shallow copy of the current store data. */ - before(u: DateTime): boolean + getAll(): _TygojaDict } - interface DateTime { + interface Store { /** - * Compare compares the current DateTime instance with u. - * If the current instance is before u, it returns -1. - * If the current instance is after u, it returns +1. - * If they're the same, it returns 0. + * Values returns a slice with all of the current store values. */ - compare(u: DateTime): number + values(): Array } - interface DateTime { + interface Store { /** - * Equal reports whether the current DateTime and u represent the same time instant. - * Two DateTime can be equal even if they are in different locations. - * For example, 6:00 +0200 and 4:00 UTC are Equal. + * Set sets (or overwrite if already exists) a new value for key. */ - equal(u: DateTime): boolean + set(key: K, value: T): void } - interface DateTime { + interface Store { /** - * Unix returns the current DateTime as a Unix time, aka. - * the number of seconds elapsed since January 1, 1970 UTC. - */ - unix(): number - } - interface DateTime { - /** - * IsZero checks whether the current DateTime instance has zero time value. - */ - isZero(): boolean - } - interface DateTime { - /** - * String serializes the current DateTime instance into a formatted - * UTC date string. + * SetFunc sets (or overwrite if already exists) a new value resolved + * from the function callback for the provided key. * - * The zero value is serialized to an empty string. + * The function callback receives as argument the old store element value (if exists). + * If there is no old store element, the argument will be the T zero value. + * + * Example: + * + * ``` + * s := store.New[string, int](nil) + * s.SetFunc("count", func(old int) int { + * return old + 1 + * }) + * ``` */ - string(): string + setFunc(key: K, fn: (old: T) => T): void } - interface DateTime { + interface Store { /** - * MarshalJSON implements the [json.Marshaler] interface. + * GetOrSet retrieves a single existing value for the provided key + * or stores a new one if it doesn't exist. + */ + getOrSet(key: K, setFunc: () => T): T + } + interface Store { + /** + * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. + * + * This method is similar to Set() but **it will skip adding new elements** + * to the store if the store length has reached the specified limit. + * false is returned if maxAllowedElements limit is reached. + */ + setIfLessThanLimit(key: K, value: T, maxAllowedElements: number): boolean + } + interface Store { + /** + * UnmarshalJSON implements [json.Unmarshaler] and imports the + * provided JSON data into the store. + * + * The store entries that match with the ones from the data will be overwritten with the new value. + */ + unmarshalJSON(data: string|Array): void + } + interface Store { + /** + * MarshalJSON implements [json.Marshaler] and export the current + * store data into valid JSON. */ marshalJSON(): string|Array } - interface DateTime { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface DateTime { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface DateTime { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current DateTime instance. - */ - scan(value: any): void - } - /** - * GeoPoint defines a struct for storing geo coordinates as serialized json object - * (e.g. {lon:0,lat:0}). - * - * Note: using object notation and not a plain array to avoid the confusion - * as there doesn't seem to be a fixed standard for the coordinates order. - */ - interface GeoPoint { - lon: number - lat: number - } - interface GeoPoint { - /** - * String returns the string representation of the current GeoPoint instance. - */ - string(): string - } - interface GeoPoint { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface GeoPoint { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current GeoPoint instance. - * - * The value argument could be nil (no-op), another GeoPoint instance, - * map or serialized json object with lat-lon props. - */ - scan(value: any): void - } - /** - * JSONArray defines a slice that is safe for json and db read/write. - */ - interface JSONArray extends Array{} - /** - * JSONMap defines a map that is safe for json and db read/write. - */ - interface JSONMap extends _TygojaDict{} - /** - * JSONRaw defines a json value type that is safe for db read/write. - */ - interface JSONRaw extends Array{} - interface JSONRaw { - /** - * String returns the current JSONRaw instance as a json encoded string. - */ - string(): string - } - interface JSONRaw { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface JSONRaw { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface JSONRaw { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface JSONRaw { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current JSONRaw instance. - */ - scan(value: any): void - } } /** @@ -16045,270 +16651,247 @@ namespace jwt { } } -namespace store { +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { /** - * Store defines a concurrent safe in memory key-value data store. + * DateTime represents a [time.Time] instance in UTC that is wrapped + * and serialized using the app default date layout. */ - interface Store { + interface DateTime { } - interface Store { + interface DateTime { /** - * Reset clears the store and replaces the store data with a - * shallow copy of the provided newData. + * Time returns the internal [time.Time] instance. */ - reset(newData: _TygojaDict): void + time(): time.Time } - interface Store { + interface DateTime { /** - * Length returns the current number of elements in the store. + * Add returns a new DateTime based on the current DateTime + the specified duration. */ - length(): number + add(duration: time.Duration): DateTime } - interface Store { + interface DateTime { /** - * RemoveAll removes all the existing store entries. - */ - removeAll(): void - } - interface Store { - /** - * Remove removes a single entry from the store. + * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one. * - * Remove does nothing if key doesn't exist in the store. + * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], + * the maximum (or minimum) duration will be returned. */ - remove(key: K): void + sub(u: DateTime): time.Duration } - interface Store { + interface DateTime { /** - * Has checks if element with the specified key exist or not. - */ - has(key: K): boolean - } - interface Store { - /** - * Get returns a single element value from the store. + * AddDate returns a new DateTime based on the current one + duration. * - * If key is not set, the zero T value is returned. + * It follows the same rules as [time.AddDate]. */ - get(key: K): T + addDate(years: number, months: number, days: number): DateTime } - interface Store { + interface DateTime { /** - * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not. + * After reports whether the current DateTime instance is after u. */ - getOk(key: K): [T, boolean] + after(u: DateTime): boolean } - interface Store { + interface DateTime { /** - * GetAll returns a shallow copy of the current store data. + * Before reports whether the current DateTime instance is before u. */ - getAll(): _TygojaDict + before(u: DateTime): boolean } - interface Store { + interface DateTime { /** - * Values returns a slice with all of the current store values. + * Compare compares the current DateTime instance with u. + * If the current instance is before u, it returns -1. + * If the current instance is after u, it returns +1. + * If they're the same, it returns 0. */ - values(): Array + compare(u: DateTime): number } - interface Store { + interface DateTime { /** - * Set sets (or overwrite if already exists) a new value for key. + * Equal reports whether the current DateTime and u represent the same time instant. + * Two DateTime can be equal even if they are in different locations. + * For example, 6:00 +0200 and 4:00 UTC are Equal. */ - set(key: K, value: T): void + equal(u: DateTime): boolean } - interface Store { + interface DateTime { /** - * SetFunc sets (or overwrite if already exists) a new value resolved - * from the function callback for the provided key. + * Unix returns the current DateTime as a Unix time, aka. + * the number of seconds elapsed since January 1, 1970 UTC. + */ + unix(): number + } + interface DateTime { + /** + * IsZero checks whether the current DateTime instance has zero time value. + */ + isZero(): boolean + } + interface DateTime { + /** + * String serializes the current DateTime instance into a formatted + * UTC date string. * - * The function callback receives as argument the old store element value (if exists). - * If there is no old store element, the argument will be the T zero value. - * - * Example: - * - * ``` - * s := store.New[string, int](nil) - * s.SetFunc("count", func(old int) int { - * return old + 1 - * }) - * ``` + * The zero value is serialized to an empty string. */ - setFunc(key: K, fn: (old: T) => T): void + string(): string } - interface Store { + interface DateTime { /** - * GetOrSet retrieves a single existing value for the provided key - * or stores a new one if it doesn't exist. - */ - getOrSet(key: K, setFunc: () => T): T - } - interface Store { - /** - * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. - * - * This method is similar to Set() but **it will skip adding new elements** - * to the store if the store length has reached the specified limit. - * false is returned if maxAllowedElements limit is reached. - */ - setIfLessThanLimit(key: K, value: T, maxAllowedElements: number): boolean - } - interface Store { - /** - * UnmarshalJSON implements [json.Unmarshaler] and imports the - * provided JSON data into the store. - * - * The store entries that match with the ones from the data will be overwritten with the new value. - */ - unmarshalJSON(data: string|Array): void - } - interface Store { - /** - * MarshalJSON implements [json.Marshaler] and export the current - * store data into valid JSON. + * MarshalJSON implements the [json.Marshaler] interface. */ marshalJSON(): string|Array } + interface DateTime { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface DateTime { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface DateTime { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current DateTime instance. + */ + scan(value: any): void + } + /** + * GeoPoint defines a struct for storing geo coordinates as serialized json object + * (e.g. {lon:0,lat:0}). + * + * Note: using object notation and not a plain array to avoid the confusion + * as there doesn't seem to be a fixed standard for the coordinates order. + */ + interface GeoPoint { + lon: number + lat: number + } + interface GeoPoint { + /** + * String returns the string representation of the current GeoPoint instance. + */ + string(): string + } + interface GeoPoint { + /** + * AsMap implements [core.mapExtractor] and returns a value suitable + * to be used in an API rule expression. + */ + asMap(): _TygojaDict + } + interface GeoPoint { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface GeoPoint { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current GeoPoint instance. + * + * The value argument could be nil (no-op), another GeoPoint instance, + * map or serialized json object with lat-lon props. + */ + scan(value: any): void + } + /** + * JSONArray defines a slice that is safe for json and db read/write. + */ + interface JSONArray extends Array{} + /** + * JSONMap defines a map that is safe for json and db read/write. + */ + interface JSONMap extends _TygojaDict{} + /** + * JSONRaw defines a json value type that is safe for db read/write. + */ + interface JSONRaw extends Array{} + interface JSONRaw { + /** + * String returns the current JSONRaw instance as a json encoded string. + */ + string(): string + } + interface JSONRaw { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface JSONRaw { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface JSONRaw { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface JSONRaw { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current JSONRaw instance. + */ + scan(value: any): void + } } -namespace subscriptions { +namespace search { /** - * Broker defines a struct for managing subscriptions clients. + * Result defines the returned search result structure. */ - interface Broker { - } - interface Broker { - /** - * Clients returns a shallow copy of all registered clients indexed - * with their connection id. - */ - clients(): _TygojaDict - } - interface Broker { - /** - * ChunkedClients splits the current clients into a chunked slice. - */ - chunkedClients(chunkSize: number): Array> - } - interface Broker { - /** - * TotalClients returns the total number of registered clients. - */ - totalClients(): number - } - interface Broker { - /** - * ClientById finds a registered client by its id. - * - * Returns non-nil error when client with clientId is not registered. - */ - clientById(clientId: string): Client - } - interface Broker { - /** - * Register adds a new client to the broker instance. - */ - register(client: Client): void - } - interface Broker { - /** - * Unregister removes a single client by its id and marks it as discarded. - * - * If client with clientId doesn't exist, this method does nothing. - */ - unregister(clientId: string): void + interface Result { + items: any + page: number + perPage: number + totalItems: number + totalPages: number } /** - * Client is an interface for a generic subscription client. + * ResolverResult defines a single FieldResolver.Resolve() successfully parsed result. */ - interface Client { - [key:string]: any; + interface ResolverResult { /** - * Id Returns the unique id of the client. + * Identifier is the plain SQL identifier/column that will be used + * in the final db expression as left or right operand. */ - id(): string + identifier: string /** - * Channel returns the client's communication channel. - * - * NB! The channel shouldn't be used after calling Discard(). + * NoCoalesce instructs to not use COALESCE or NULL fallbacks + * when building the identifier expression. */ - channel(): undefined + noCoalesce: boolean /** - * Subscriptions returns a shallow copy of the client subscriptions matching the prefixes. - * If no prefix is specified, returns all subscriptions. + * Params is a map with db placeholder->value pairs that will be added + * to the query when building both resolved operands/sides in a single expression. */ - subscriptions(...prefixes: string[]): _TygojaDict + params: dbx.Params /** - * Subscribe subscribes the client to the provided subscriptions list. - * - * Each subscription can also have "options" (json serialized SubscriptionOptions) as query parameter. - * - * Example: - * - * ``` - * Subscribe( - * "subscriptionA", - * `subscriptionB?options={"query":{"a":1},"headers":{"x_token":"abc"}}`, - * ) - * ``` + * MultiMatchSubQuery is an optional sub query expression that will be added + * in addition to the combined ResolverResult expression during build. */ - subscribe(...subs: string[]): void + multiMatchSubQuery: dbx.Expression /** - * Unsubscribe unsubscribes the client from the provided subscriptions list. + * AfterBuild is an optional function that will be called after building + * and combining the result of both resolved operands/sides in a single expression. */ - unsubscribe(...subs: string[]): void - /** - * HasSubscription checks if the client is subscribed to `sub`. - */ - hasSubscription(sub: string): boolean - /** - * Set stores any value to the client's context. - */ - set(key: string, value: any): void - /** - * Unset removes a single value from the client's context. - */ - unset(key: string): void - /** - * Get retrieves the key value from the client's context. - */ - get(key: string): any - /** - * Discard marks the client as "discarded" (and closes its channel), - * meaning that it shouldn't be used anymore for sending new messages. - * - * It is safe to call Discard() multiple times. - */ - discard(): void - /** - * IsDiscarded indicates whether the client has been "discarded" - * and should no longer be used. - */ - isDiscarded(): boolean - /** - * Send sends the specified message to the client's channel (if not discarded). - */ - send(m: Message): void - } - /** - * Message defines a client's channel data. - */ - interface Message { - name: string - data: string|Array - } - interface Message { - /** - * WriteSSE writes the current message in a SSE format into the provided writer. - * - * For example, writing to a router.Event: - * - * ``` - * m := Message{Name: "users/create", Data: []byte{...}} - * m.Write(e.Response, "yourEventId") - * e.Flush() - * ``` - */ - writeSSE(w: io.Writer, eventId: string): void + afterBuild: (expr: dbx.Expression) => dbx.Expression } } @@ -16322,8 +16905,8 @@ namespace bufio { * ReadWriter stores pointers to a [Reader] and a [Writer]. * It implements [io.ReadWriter]. */ - type _srCeaYl = Reader&Writer - interface ReadWriter extends _srCeaYl { + type _sbQyxHc = Reader&Writer + interface ReadWriter extends _sbQyxHc { } } @@ -17354,6 +17937,1226 @@ namespace http { } } +namespace auth { + /** + * Provider defines a common interface for an OAuth2 client. + */ + interface Provider { + [key:string]: any; + /** + * Context returns the context associated with the provider (if any). + */ + context(): context.Context + /** + * SetContext assigns the specified context to the current provider. + */ + setContext(ctx: context.Context): void + /** + * PKCE indicates whether the provider can use the PKCE flow. + */ + pkce(): boolean + /** + * SetPKCE toggles the state whether the provider can use the PKCE flow or not. + */ + setPKCE(enable: boolean): void + /** + * DisplayName usually returns provider name as it is officially written + * and it could be used directly in the UI. + */ + displayName(): string + /** + * SetDisplayName sets the provider's display name. + */ + setDisplayName(displayName: string): void + /** + * Scopes returns the provider access permissions that will be requested. + */ + scopes(): Array + /** + * SetScopes sets the provider access permissions that will be requested later. + */ + setScopes(scopes: Array): void + /** + * ClientId returns the provider client's app ID. + */ + clientId(): string + /** + * SetClientId sets the provider client's ID. + */ + setClientId(clientId: string): void + /** + * ClientSecret returns the provider client's app secret. + */ + clientSecret(): string + /** + * SetClientSecret sets the provider client's app secret. + */ + setClientSecret(secret: string): void + /** + * RedirectURL returns the end address to redirect the user + * going through the OAuth flow. + */ + redirectURL(): string + /** + * SetRedirectURL sets the provider's RedirectURL. + */ + setRedirectURL(url: string): void + /** + * AuthURL returns the provider's authorization service url. + */ + authURL(): string + /** + * SetAuthURL sets the provider's AuthURL. + */ + setAuthURL(url: string): void + /** + * TokenURL returns the provider's token exchange service url. + */ + tokenURL(): string + /** + * SetTokenURL sets the provider's TokenURL. + */ + setTokenURL(url: string): void + /** + * UserInfoURL returns the provider's user info api url. + */ + userInfoURL(): string + /** + * SetUserInfoURL sets the provider's UserInfoURL. + */ + setUserInfoURL(url: string): void + /** + * Extra returns a shallow copy of any custom config data + * that the provider may be need. + */ + extra(): _TygojaDict + /** + * SetExtra updates the provider's custom config data. + */ + setExtra(data: _TygojaDict): void + /** + * Client returns an http client using the provided token. + */ + client(token: oauth2.Token): (any) + /** + * BuildAuthURL returns a URL to the provider's consent page + * that asks for permissions for the required scopes explicitly. + */ + buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string + /** + * FetchToken converts an authorization code to token. + */ + fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token) + /** + * FetchRawUserInfo requests and marshalizes into `result` the + * the OAuth user api response. + */ + fetchRawUserInfo(token: oauth2.Token): string|Array + /** + * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and + * marshalizes the user api response into a standardized AuthUser struct. + */ + fetchAuthUser(token: oauth2.Token): (AuthUser) + } + /** + * AuthUser defines a standardized OAuth2 user data structure. + */ + interface AuthUser { + expiry: types.DateTime + rawUser: _TygojaDict + id: string + name: string + username: string + email: string + avatarURL: string + accessToken: string + refreshToken: string + /** + * @todo + * deprecated: use AvatarURL instead + * AvatarUrl will be removed after dropping v0.22 support + */ + avatarUrl: string + } + interface AuthUser { + /** + * MarshalJSON implements the [json.Marshaler] interface. + * + * @todo remove after dropping v0.22 support + */ + marshalJSON(): string|Array + } +} + +namespace subscriptions { + /** + * Broker defines a struct for managing subscriptions clients. + */ + interface Broker { + } + interface Broker { + /** + * Clients returns a shallow copy of all registered clients indexed + * with their connection id. + */ + clients(): _TygojaDict + } + interface Broker { + /** + * ChunkedClients splits the current clients into a chunked slice. + */ + chunkedClients(chunkSize: number): Array> + } + interface Broker { + /** + * TotalClients returns the total number of registered clients. + */ + totalClients(): number + } + interface Broker { + /** + * ClientById finds a registered client by its id. + * + * Returns non-nil error when client with clientId is not registered. + */ + clientById(clientId: string): Client + } + interface Broker { + /** + * Register adds a new client to the broker instance. + */ + register(client: Client): void + } + interface Broker { + /** + * Unregister removes a single client by its id and marks it as discarded. + * + * If client with clientId doesn't exist, this method does nothing. + */ + unregister(clientId: string): void + } + /** + * Client is an interface for a generic subscription client. + */ + interface Client { + [key:string]: any; + /** + * Id Returns the unique id of the client. + */ + id(): string + /** + * Channel returns the client's communication channel. + * + * NB! The channel shouldn't be used after calling Discard(). + */ + channel(): undefined + /** + * Subscriptions returns a shallow copy of the client subscriptions matching the prefixes. + * If no prefix is specified, returns all subscriptions. + */ + subscriptions(...prefixes: string[]): _TygojaDict + /** + * Subscribe subscribes the client to the provided subscriptions list. + * + * Each subscription can also have "options" (json serialized SubscriptionOptions) as query parameter. + * + * Example: + * + * ``` + * Subscribe( + * "subscriptionA", + * `subscriptionB?options={"query":{"a":1},"headers":{"x_token":"abc"}}`, + * ) + * ``` + */ + subscribe(...subs: string[]): void + /** + * Unsubscribe unsubscribes the client from the provided subscriptions list. + */ + unsubscribe(...subs: string[]): void + /** + * HasSubscription checks if the client is subscribed to `sub`. + */ + hasSubscription(sub: string): boolean + /** + * Set stores any value to the client's context. + */ + set(key: string, value: any): void + /** + * Unset removes a single value from the client's context. + */ + unset(key: string): void + /** + * Get retrieves the key value from the client's context. + */ + get(key: string): any + /** + * Discard marks the client as "discarded" (and closes its channel), + * meaning that it shouldn't be used anymore for sending new messages. + * + * It is safe to call Discard() multiple times. + */ + discard(): void + /** + * IsDiscarded indicates whether the client has been "discarded" + * and should no longer be used. + */ + isDiscarded(): boolean + /** + * Send sends the specified message to the client's channel (if not discarded). + */ + send(m: Message): void + } + /** + * Message defines a client's channel data. + */ + interface Message { + name: string + data: string|Array + } + interface Message { + /** + * WriteSSE writes the current message in a SSE format into the provided writer. + * + * For example, writing to a router.Event: + * + * ``` + * m := Message{Name: "users/create", Data: []byte{...}} + * m.Write(e.Response, "yourEventId") + * e.Flush() + * ``` + */ + writeSSE(w: io.Writer, eventId: string): void + } +} + +/** + * Package cron implements a crontab-like service to execute and schedule + * repeative tasks/jobs. + * + * Example: + * + * ``` + * c := cron.New() + * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) + * c.Start() + * ``` + */ +namespace cron { + /** + * Cron is a crontab-like struct for tasks/jobs scheduling. + */ + interface Cron { + } + interface Cron { + /** + * SetInterval changes the current cron tick interval + * (it usually should be >= 1 minute). + */ + setInterval(d: time.Duration): void + } + interface Cron { + /** + * SetTimezone changes the current cron tick timezone. + */ + setTimezone(l: time.Location): void + } + interface Cron { + /** + * MustAdd is similar to Add() but panic on failure. + */ + mustAdd(jobId: string, cronExpr: string, run: () => void): void + } + interface Cron { + /** + * Add registers a single cron job. + * + * If there is already a job with the provided id, then the old job + * will be replaced with the new one. + * + * cronExpr is a regular cron expression, eg. "0 *\/3 * * *" (aka. at minute 0 past every 3rd hour). + * Check cron.NewSchedule() for the supported tokens. + */ + add(jobId: string, cronExpr: string, fn: () => void): void + } + interface Cron { + /** + * Remove removes a single cron job by its id. + */ + remove(jobId: string): void + } + interface Cron { + /** + * RemoveAll removes all registered cron jobs. + */ + removeAll(): void + } + interface Cron { + /** + * Total returns the current total number of registered cron jobs. + */ + total(): number + } + interface Cron { + /** + * Jobs returns a shallow copy of the currently registered cron jobs. + */ + jobs(): Array<(Job | undefined)> + } + interface Cron { + /** + * Stop stops the current cron ticker (if not already). + * + * You can resume the ticker by calling Start(). + */ + stop(): void + } + interface Cron { + /** + * Start starts the cron ticker. + * + * Calling Start() on already started cron will restart the ticker. + */ + start(): void + } + interface Cron { + /** + * HasStarted checks whether the current Cron ticker has been started. + */ + hasStarted(): boolean + } +} + +namespace exec { + /** + * Cmd represents an external command being prepared or run. + * + * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] + * methods. + */ + interface Cmd { + /** + * Path is the path of the command to run. + * + * This is the only field that must be set to a non-zero + * value. If Path is relative, it is evaluated relative + * to Dir. + */ + path: string + /** + * Args holds command line arguments, including the command as Args[0]. + * If the Args field is empty or nil, Run uses {Path}. + * + * In typical use, both Path and Args are set by calling Command. + */ + args: Array + /** + * Env specifies the environment of the process. + * Each entry is of the form "key=value". + * If Env is nil, the new process uses the current process's + * environment. + * If Env contains duplicate environment keys, only the last + * value in the slice for each duplicate key is used. + * As a special case on Windows, SYSTEMROOT is always added if + * missing and not explicitly set to the empty string. + */ + env: Array + /** + * Dir specifies the working directory of the command. + * If Dir is the empty string, Run runs the command in the + * calling process's current directory. + */ + dir: string + /** + * Stdin specifies the process's standard input. + * + * If Stdin is nil, the process reads from the null device (os.DevNull). + * + * If Stdin is an *os.File, the process's standard input is connected + * directly to that file. + * + * Otherwise, during the execution of the command a separate + * goroutine reads from Stdin and delivers that data to the command + * over a pipe. In this case, Wait does not complete until the goroutine + * stops copying, either because it has reached the end of Stdin + * (EOF or a read error), or because writing to the pipe returned an error, + * or because a nonzero WaitDelay was set and expired. + */ + stdin: io.Reader + /** + * Stdout and Stderr specify the process's standard output and error. + * + * If either is nil, Run connects the corresponding file descriptor + * to the null device (os.DevNull). + * + * If either is an *os.File, the corresponding output from the process + * is connected directly to that file. + * + * Otherwise, during the execution of the command a separate goroutine + * reads from the process over a pipe and delivers that data to the + * corresponding Writer. In this case, Wait does not complete until the + * goroutine reaches EOF or encounters an error or a nonzero WaitDelay + * expires. + * + * If Stdout and Stderr are the same writer, and have a type that can + * be compared with ==, at most one goroutine at a time will call Write. + */ + stdout: io.Writer + stderr: io.Writer + /** + * ExtraFiles specifies additional open files to be inherited by the + * new process. It does not include standard input, standard output, or + * standard error. If non-nil, entry i becomes file descriptor 3+i. + * + * ExtraFiles is not supported on Windows. + */ + extraFiles: Array<(os.File | undefined)> + /** + * SysProcAttr holds optional, operating system-specific attributes. + * Run passes it to os.StartProcess as the os.ProcAttr's Sys field. + */ + sysProcAttr?: syscall.SysProcAttr + /** + * Process is the underlying process, once started. + */ + process?: os.Process + /** + * ProcessState contains information about an exited process. + * If the process was started successfully, Wait or Run will + * populate its ProcessState when the command completes. + */ + processState?: os.ProcessState + err: Error // LookPath error, if any. + /** + * If Cancel is non-nil, the command must have been created with + * CommandContext and Cancel will be called when the command's + * Context is done. By default, CommandContext sets Cancel to + * call the Kill method on the command's Process. + * + * Typically a custom Cancel will send a signal to the command's + * Process, but it may instead take other actions to initiate cancellation, + * such as closing a stdin or stdout pipe or sending a shutdown request on a + * network socket. + * + * If the command exits with a success status after Cancel is + * called, and Cancel does not return an error equivalent to + * os.ErrProcessDone, then Wait and similar methods will return a non-nil + * error: either an error wrapping the one returned by Cancel, + * or the error from the Context. + * (If the command exits with a non-success status, or Cancel + * returns an error that wraps os.ErrProcessDone, Wait and similar methods + * continue to return the command's usual exit status.) + * + * If Cancel is set to nil, nothing will happen immediately when the command's + * Context is done, but a nonzero WaitDelay will still take effect. That may + * be useful, for example, to work around deadlocks in commands that do not + * support shutdown signals but are expected to always finish quickly. + * + * Cancel will not be called if Start returns a non-nil error. + */ + cancel: () => void + /** + * If WaitDelay is non-zero, it bounds the time spent waiting on two sources + * of unexpected delay in Wait: a child process that fails to exit after the + * associated Context is canceled, and a child process that exits but leaves + * its I/O pipes unclosed. + * + * The WaitDelay timer starts when either the associated Context is done or a + * call to Wait observes that the child process has exited, whichever occurs + * first. When the delay has elapsed, the command shuts down the child process + * and/or its I/O pipes. + * + * If the child process has failed to exit — perhaps because it ignored or + * failed to receive a shutdown signal from a Cancel function, or because no + * Cancel function was set — then it will be terminated using os.Process.Kill. + * + * Then, if the I/O pipes communicating with the child process are still open, + * those pipes are closed in order to unblock any goroutines currently blocked + * on Read or Write calls. + * + * If pipes are closed due to WaitDelay, no Cancel call has occurred, + * and the command has otherwise exited with a successful status, Wait and + * similar methods will return ErrWaitDelay instead of nil. + * + * If WaitDelay is zero (the default), I/O pipes will be read until EOF, + * which might not occur until orphaned subprocesses of the command have + * also closed their descriptors for the pipes. + */ + waitDelay: time.Duration + } + interface Cmd { + /** + * String returns a human-readable description of c. + * It is intended only for debugging. + * In particular, it is not suitable for use as input to a shell. + * The output of String may vary across Go releases. + */ + string(): string + } + interface Cmd { + /** + * Run starts the specified command and waits for it to complete. + * + * The returned error is nil if the command runs, has no problems + * copying stdin, stdout, and stderr, and exits with a zero exit + * status. + * + * If the command starts but does not complete successfully, the error is of + * type [*ExitError]. Other error types may be returned for other situations. + * + * If the calling goroutine has locked the operating system thread + * with [runtime.LockOSThread] and modified any inheritable OS-level + * thread state (for example, Linux or Plan 9 name spaces), the new + * process will inherit the caller's thread state. + */ + run(): void + } + interface Cmd { + /** + * Start starts the specified command but does not wait for it to complete. + * + * If Start returns successfully, the c.Process field will be set. + * + * After a successful call to Start the [Cmd.Wait] method must be called in + * order to release associated system resources. + */ + start(): void + } + interface Cmd { + /** + * Wait waits for the command to exit and waits for any copying to + * stdin or copying from stdout or stderr to complete. + * + * The command must have been started by [Cmd.Start]. + * + * The returned error is nil if the command runs, has no problems + * copying stdin, stdout, and stderr, and exits with a zero exit + * status. + * + * If the command fails to run or doesn't complete successfully, the + * error is of type [*ExitError]. Other error types may be + * returned for I/O problems. + * + * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits + * for the respective I/O loop copying to or from the process to complete. + * + * Wait releases any resources associated with the [Cmd]. + */ + wait(): void + } + interface Cmd { + /** + * Output runs the command and returns its standard output. + * Any returned error will usually be of type [*ExitError]. + * If c.Stderr was nil, Output populates [ExitError.Stderr]. + */ + output(): string|Array + } + interface Cmd { + /** + * CombinedOutput runs the command and returns its combined standard + * output and standard error. + */ + combinedOutput(): string|Array + } + interface Cmd { + /** + * StdinPipe returns a pipe that will be connected to the command's + * standard input when the command starts. + * The pipe will be closed automatically after [Cmd.Wait] sees the command exit. + * A caller need only call Close to force the pipe to close sooner. + * For example, if the command being run will not exit until standard input + * is closed, the caller must close the pipe. + */ + stdinPipe(): io.WriteCloser + } + interface Cmd { + /** + * StdoutPipe returns a pipe that will be connected to the command's + * standard output when the command starts. + * + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. + * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. + * See the example for idiomatic usage. + */ + stdoutPipe(): io.ReadCloser + } + interface Cmd { + /** + * StderrPipe returns a pipe that will be connected to the command's + * standard error when the command starts. + * + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. + * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. + * See the StdoutPipe example for idiomatic usage. + */ + stderrPipe(): io.ReadCloser + } + interface Cmd { + /** + * Environ returns a copy of the environment in which the command would be run + * as it is currently configured. + */ + environ(): Array + } +} + +/** + * Package blob defines a lightweight abstration for interacting with + * various storage services (local filesystem, S3, etc.). + * + * NB! + * For compatibility with earlier PocketBase versions and to prevent + * unnecessary breaking changes, this package is based and implemented + * as a minimal, stripped down version of the previously used gocloud.dev/blob. + * While there is no promise that it won't diverge in the future to accommodate + * better some PocketBase specific use cases, currently it copies and + * tries to follow as close as possible the same implementations, + * conventions and rules for the key escaping/unescaping, blob read/write + * interfaces and struct options as gocloud.dev/blob, therefore the + * credits goes to the original Go Cloud Development Kit Authors. + */ +namespace blob { + /** + * ListObject represents a single blob returned from List. + */ + interface ListObject { + /** + * Key is the key for this blob. + */ + key: string + /** + * ModTime is the time the blob was last modified. + */ + modTime: time.Time + /** + * Size is the size of the blob's content in bytes. + */ + size: number + /** + * MD5 is an MD5 hash of the blob contents or nil if not available. + */ + md5: string|Array + /** + * IsDir indicates that this result represents a "directory" in the + * hierarchical namespace, ending in ListOptions.Delimiter. Key can be + * passed as ListOptions.Prefix to list items in the "directory". + * Fields other than Key and IsDir will not be set if IsDir is true. + */ + isDir: boolean + } + /** + * Attributes contains attributes about a blob. + */ + interface Attributes { + /** + * CacheControl specifies caching attributes that services may use + * when serving the blob. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + */ + cacheControl: string + /** + * ContentDisposition specifies whether the blob content is expected to be + * displayed inline or as an attachment. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition + */ + contentDisposition: string + /** + * ContentEncoding specifies the encoding used for the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + */ + contentEncoding: string + /** + * ContentLanguage specifies the language used in the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language + */ + contentLanguage: string + /** + * ContentType is the MIME type of the blob. It will not be empty. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type + */ + contentType: string + /** + * Metadata holds key/value pairs associated with the blob. + * Keys are guaranteed to be in lowercase, even if the backend service + * has case-sensitive keys (although note that Metadata written via + * this package will always be lowercased). If there are duplicate + * case-insensitive keys (e.g., "foo" and "FOO"), only one value + * will be kept, and it is undefined which one. + */ + metadata: _TygojaDict + /** + * CreateTime is the time the blob was created, if available. If not available, + * CreateTime will be the zero time. + */ + createTime: time.Time + /** + * ModTime is the time the blob was last modified. + */ + modTime: time.Time + /** + * Size is the size of the blob's content in bytes. + */ + size: number + /** + * MD5 is an MD5 hash of the blob contents or nil if not available. + */ + md5: string|Array + /** + * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. + */ + eTag: string + } + /** + * Reader reads bytes from a blob. + * It implements io.ReadSeekCloser, and must be closed after reads are finished. + */ + interface Reader { + } + interface Reader { + /** + * Read implements io.Reader (https://golang.org/pkg/io/#Reader). + */ + read(p: string|Array): number + } + interface Reader { + /** + * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). + */ + seek(offset: number, whence: number): number + } + interface Reader { + /** + * Close implements io.Closer (https://golang.org/pkg/io/#Closer). + */ + close(): void + } + interface Reader { + /** + * ContentType returns the MIME type of the blob. + */ + contentType(): string + } + interface Reader { + /** + * ModTime returns the time the blob was last modified. + */ + modTime(): time.Time + } + interface Reader { + /** + * Size returns the size of the blob content in bytes. + */ + size(): number + } + interface Reader { + /** + * WriteTo reads from r and writes to w until there's no more data or + * an error occurs. + * The return value is the number of bytes written to w. + * + * It implements the io.WriterTo interface. + */ + writeTo(w: io.Writer): number + } +} + +namespace hook { + /** + * Event implements [Resolver] and it is intended to be used as a base + * Hook event that you can embed in your custom typed event structs. + * + * Example: + * + * ``` + * type CustomEvent struct { + * hook.Event + * + * SomeField int + * } + * ``` + */ + interface Event { + } + interface Event { + /** + * Next calls the next hook handler. + */ + next(): void + } + /** + * Handler defines a single Hook handler. + * Multiple handlers can share the same id. + * If Id is not explicitly set it will be autogenerated by Hook.Add and Hook.AddHandler. + */ + interface Handler { + /** + * Func defines the handler function to execute. + * + * Note that users need to call e.Next() in order to proceed with + * the execution of the hook chain. + */ + func: (_arg0: T) => void + /** + * Id is the unique identifier of the handler. + * + * It could be used later to remove the handler from a hook via [Hook.Remove]. + * + * If missing, an autogenerated value will be assigned when adding + * the handler to a hook. + */ + id: string + /** + * Priority allows changing the default exec priority of the handler within a hook. + * + * If 0, the handler will be executed in the same order it was registered. + */ + priority: number + } + /** + * Hook defines a generic concurrent safe structure for managing event hooks. + * + * When using custom event it must embed the base [hook.Event]. + * + * Example: + * + * ``` + * type CustomEvent struct { + * hook.Event + * SomeField int + * } + * + * h := Hook[*CustomEvent]{} + * + * h.BindFunc(func(e *CustomEvent) error { + * println(e.SomeField) + * + * return e.Next() + * }) + * + * h.Trigger(&CustomEvent{ SomeField: 123 }) + * ``` + */ + interface Hook { + } + /** + * TaggedHook defines a proxy hook which register handlers that are triggered only + * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). + */ + type _sjuIJEF = mainHook + interface TaggedHook extends _sjuIJEF { + } +} + +namespace mailer { + /** + * Message defines a generic email message struct. + */ + interface Message { + from: { address: string; name?: string; } + to: Array<{ address: string; name?: string; }> + bcc: Array<{ address: string; name?: string; }> + cc: Array<{ address: string; name?: string; }> + subject: string + html: string + text: string + headers: _TygojaDict + attachments: _TygojaDict + inlineAttachments: _TygojaDict + } + /** + * Mailer defines a base mail client interface. + */ + interface Mailer { + [key:string]: any; + /** + * Send sends an email with the provided Message. + */ + send(message: Message): void + } +} + +namespace router { + // @ts-ignore + import validation = ozzo_validation + /** + * ApiError defines the struct for a basic api error response. + */ + interface ApiError { + data: _TygojaDict + message: string + status: number + } + interface ApiError { + /** + * Error makes it compatible with the `error` interface. + */ + error(): string + } + interface ApiError { + /** + * RawData returns the unformatted error data (could be an internal error, text, etc.) + */ + rawData(): any + } + interface ApiError { + /** + * Is reports whether the current ApiError wraps the target. + */ + is(target: Error): boolean + } + /** + * Event specifies based Route handler event that is usually intended + * to be embedded as part of a custom event struct. + * + * NB! It is expected that the Response and Request fields are always set. + */ + type _sdEXTFq = hook.Event + interface Event extends _sdEXTFq { + response: http.ResponseWriter + request?: http.Request + } + interface Event { + /** + * Written reports whether the current response has already been written. + * + * This method always returns false if e.ResponseWritter doesn't implement the WriteTracker interface + * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). + */ + written(): boolean + } + interface Event { + /** + * Status reports the status code of the current response. + * + * This method always returns 0 if e.Response doesn't implement the StatusTracker interface + * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). + */ + status(): number + } + interface Event { + /** + * Flush flushes buffered data to the current response. + * + * Returns [http.ErrNotSupported] if e.Response doesn't implement the [http.Flusher] interface + * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). + */ + flush(): void + } + interface Event { + /** + * IsTLS reports whether the connection on which the request was received is TLS. + */ + isTLS(): boolean + } + interface Event { + /** + * SetCookie is an alias for [http.SetCookie]. + * + * SetCookie adds a Set-Cookie header to the current response's headers. + * The provided cookie must have a valid Name. + * Invalid cookies may be silently dropped. + */ + setCookie(cookie: http.Cookie): void + } + interface Event { + /** + * RemoteIP returns the IP address of the client that sent the request. + * + * IPv6 addresses are returned expanded. + * For example, "2001:db8::1" becomes "2001:0db8:0000:0000:0000:0000:0000:0001". + * + * Note that if you are behind reverse proxy(ies), this method returns + * the IP of the last connecting proxy. + */ + remoteIP(): string + } + interface Event { + /** + * FindUploadedFiles extracts all form files of "key" from a http request + * and returns a slice with filesystem.File instances (if any). + */ + findUploadedFiles(key: string): Array<(filesystem.File | undefined)> + } + interface Event { + /** + * Get retrieves single value from the current event data store. + */ + get(key: string): any + } + interface Event { + /** + * GetAll returns a copy of the current event data store. + */ + getAll(): _TygojaDict + } + interface Event { + /** + * Set saves single value into the current event data store. + */ + set(key: string, value: any): void + } + interface Event { + /** + * SetAll saves all items from m into the current event data store. + */ + setAll(m: _TygojaDict): void + } + interface Event { + /** + * String writes a plain string response. + */ + string(status: number, data: string): void + } + interface Event { + /** + * HTML writes an HTML response. + */ + html(status: number, data: string): void + } + interface Event { + /** + * JSON writes a JSON response. + * + * It also provides a generic response data fields picker if the "fields" query parameter is set. + * For example, if you are requesting `?fields=a,b` for `e.JSON(200, map[string]int{ "a":1, "b":2, "c":3 })`, + * it should result in a JSON response like: `{"a":1, "b": 2}`. + */ + json(status: number, data: any): void + } + interface Event { + /** + * XML writes an XML response. + * It automatically prepends the generic [xml.Header] string to the response. + */ + xml(status: number, data: any): void + } + interface Event { + /** + * Stream streams the specified reader into the response. + */ + stream(status: number, contentType: string, reader: io.Reader): void + } + interface Event { + /** + * Blob writes a blob (bytes slice) response. + */ + blob(status: number, contentType: string, b: string|Array): void + } + interface Event { + /** + * FileFS serves the specified filename from fsys. + * + * It is similar to [echo.FileFS] for consistency with earlier versions. + */ + fileFS(fsys: fs.FS, filename: string): void + } + interface Event { + /** + * NoContent writes a response with no body (ex. 204). + */ + noContent(status: number): void + } + interface Event { + /** + * Redirect writes a redirect response to the specified url. + * The status code must be in between 300 – 399 range. + */ + redirect(status: number, url: string): void + } + interface Event { + error(status: number, message: string, errData: any): (ApiError) + } + interface Event { + badRequestError(message: string, errData: any): (ApiError) + } + interface Event { + notFoundError(message: string, errData: any): (ApiError) + } + interface Event { + forbiddenError(message: string, errData: any): (ApiError) + } + interface Event { + unauthorizedError(message: string, errData: any): (ApiError) + } + interface Event { + tooManyRequestsError(message: string, errData: any): (ApiError) + } + interface Event { + internalServerError(message: string, errData: any): (ApiError) + } + interface Event { + /** + * BindBody unmarshal the request body into the provided dst. + * + * dst must be either a struct pointer or map[string]any. + * + * The rules how the body will be scanned depends on the request Content-Type. + * + * Currently the following Content-Types are supported: + * ``` + * - application/json + * - text/xml, application/xml + * - multipart/form-data, application/x-www-form-urlencoded + * ``` + * + * Respectively the following struct tags are supported (again, which one will be used depends on the Content-Type): + * ``` + * - "json" (json body)- uses the builtin Go json package for unmarshaling. + * - "xml" (xml body) - uses the builtin Go xml package for unmarshaling. + * - "form" (form data) - utilizes the custom [router.UnmarshalRequestData] method. + * ``` + * + * NB! When dst is a struct make sure that it doesn't have public fields + * that shouldn't be bindable and it is advisible such fields to be unexported + * or have a separate struct just for the binding. For example: + * + * ``` + * data := struct{ + * somethingPrivate string + * + * Title string `json:"title" form:"title"` + * Total int `json:"total" form:"total"` + * } + * err := e.BindBody(&data) + * ``` + */ + bindBody(dst: any): void + } + /** + * Router defines a thin wrapper around the standard Go [http.ServeMux] by + * adding support for routing sub-groups, middlewares and other common utils. + * + * Example: + * + * ``` + * r := NewRouter[*MyEvent](eventFactory) + * + * // middlewares + * r.BindFunc(m1, m2) + * + * // routes + * r.GET("/test", handler1) + * + * // sub-routers/groups + * api := r.Group("/api") + * api.GET("/admins", handler2) + * + * // generate a http.ServeMux instance based on the router configurations + * mux, _ := r.BuildMux() + * + * http.ListenAndServe("localhost:8090", mux) + * ``` + */ + type _sCpGcEg = RouterGroup + interface Router extends _sCpGcEg { + } +} + /** * Package slog provides structured logging, * in which log records include a message, @@ -17844,190 +19647,6 @@ namespace slog { } } -namespace hook { - /** - * Event implements [Resolver] and it is intended to be used as a base - * Hook event that you can embed in your custom typed event structs. - * - * Example: - * - * ``` - * type CustomEvent struct { - * hook.Event - * - * SomeField int - * } - * ``` - */ - interface Event { - } - interface Event { - /** - * Next calls the next hook handler. - */ - next(): void - } - /** - * Handler defines a single Hook handler. - * Multiple handlers can share the same id. - * If Id is not explicitly set it will be autogenerated by Hook.Add and Hook.AddHandler. - */ - interface Handler { - /** - * Func defines the handler function to execute. - * - * Note that users need to call e.Next() in order to proceed with - * the execution of the hook chain. - */ - func: (_arg0: T) => void - /** - * Id is the unique identifier of the handler. - * - * It could be used later to remove the handler from a hook via [Hook.Remove]. - * - * If missing, an autogenerated value will be assigned when adding - * the handler to a hook. - */ - id: string - /** - * Priority allows changing the default exec priority of the handler within a hook. - * - * If 0, the handler will be executed in the same order it was registered. - */ - priority: number - } - /** - * Hook defines a generic concurrent safe structure for managing event hooks. - * - * When using custom event it must embed the base [hook.Event]. - * - * Example: - * - * ``` - * type CustomEvent struct { - * hook.Event - * SomeField int - * } - * - * h := Hook[*CustomEvent]{} - * - * h.BindFunc(func(e *CustomEvent) error { - * println(e.SomeField) - * - * return e.Next() - * }) - * - * h.Trigger(&CustomEvent{ SomeField: 123 }) - * ``` - */ - interface Hook { - } - /** - * TaggedHook defines a proxy hook which register handlers that are triggered only - * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). - */ - type _smOteQl = mainHook - interface TaggedHook extends _smOteQl { - } -} - -/** - * Package cron implements a crontab-like service to execute and schedule - * repeative tasks/jobs. - * - * Example: - * - * ``` - * c := cron.New() - * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) - * c.Start() - * ``` - */ -namespace cron { - /** - * Cron is a crontab-like struct for tasks/jobs scheduling. - */ - interface Cron { - } - interface Cron { - /** - * SetInterval changes the current cron tick interval - * (it usually should be >= 1 minute). - */ - setInterval(d: time.Duration): void - } - interface Cron { - /** - * SetTimezone changes the current cron tick timezone. - */ - setTimezone(l: time.Location): void - } - interface Cron { - /** - * MustAdd is similar to Add() but panic on failure. - */ - mustAdd(jobId: string, cronExpr: string, run: () => void): void - } - interface Cron { - /** - * Add registers a single cron job. - * - * If there is already a job with the provided id, then the old job - * will be replaced with the new one. - * - * cronExpr is a regular cron expression, eg. "0 *\/3 * * *" (aka. at minute 0 past every 3rd hour). - * Check cron.NewSchedule() for the supported tokens. - */ - add(jobId: string, cronExpr: string, fn: () => void): void - } - interface Cron { - /** - * Remove removes a single cron job by its id. - */ - remove(jobId: string): void - } - interface Cron { - /** - * RemoveAll removes all registered cron jobs. - */ - removeAll(): void - } - interface Cron { - /** - * Total returns the current total number of registered cron jobs. - */ - total(): number - } - interface Cron { - /** - * Jobs returns a shallow copy of the currently registered cron jobs. - */ - jobs(): Array<(Job | undefined)> - } - interface Cron { - /** - * Stop stops the current cron ticker (if not already). - * - * You can resume the ticker by calling Start(). - */ - stop(): void - } - interface Cron { - /** - * Start starts the cron ticker. - * - * Calling Start() on already started cron will restart the ticker. - */ - start(): void - } - interface Cron { - /** - * HasStarted checks whether the current Cron ticker has been started. - */ - hasStarted(): boolean - } -} - /** * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. @@ -19110,1618 +20729,6 @@ namespace cobra { } } -namespace auth { - /** - * Provider defines a common interface for an OAuth2 client. - */ - interface Provider { - [key:string]: any; - /** - * Context returns the context associated with the provider (if any). - */ - context(): context.Context - /** - * SetContext assigns the specified context to the current provider. - */ - setContext(ctx: context.Context): void - /** - * PKCE indicates whether the provider can use the PKCE flow. - */ - pkce(): boolean - /** - * SetPKCE toggles the state whether the provider can use the PKCE flow or not. - */ - setPKCE(enable: boolean): void - /** - * DisplayName usually returns provider name as it is officially written - * and it could be used directly in the UI. - */ - displayName(): string - /** - * SetDisplayName sets the provider's display name. - */ - setDisplayName(displayName: string): void - /** - * Scopes returns the provider access permissions that will be requested. - */ - scopes(): Array - /** - * SetScopes sets the provider access permissions that will be requested later. - */ - setScopes(scopes: Array): void - /** - * ClientId returns the provider client's app ID. - */ - clientId(): string - /** - * SetClientId sets the provider client's ID. - */ - setClientId(clientId: string): void - /** - * ClientSecret returns the provider client's app secret. - */ - clientSecret(): string - /** - * SetClientSecret sets the provider client's app secret. - */ - setClientSecret(secret: string): void - /** - * RedirectURL returns the end address to redirect the user - * going through the OAuth flow. - */ - redirectURL(): string - /** - * SetRedirectURL sets the provider's RedirectURL. - */ - setRedirectURL(url: string): void - /** - * AuthURL returns the provider's authorization service url. - */ - authURL(): string - /** - * SetAuthURL sets the provider's AuthURL. - */ - setAuthURL(url: string): void - /** - * TokenURL returns the provider's token exchange service url. - */ - tokenURL(): string - /** - * SetTokenURL sets the provider's TokenURL. - */ - setTokenURL(url: string): void - /** - * UserInfoURL returns the provider's user info api url. - */ - userInfoURL(): string - /** - * SetUserInfoURL sets the provider's UserInfoURL. - */ - setUserInfoURL(url: string): void - /** - * Extra returns a shallow copy of any custom config data - * that the provider may be need. - */ - extra(): _TygojaDict - /** - * SetExtra updates the provider's custom config data. - */ - setExtra(data: _TygojaDict): void - /** - * Client returns an http client using the provided token. - */ - client(token: oauth2.Token): (any) - /** - * BuildAuthURL returns a URL to the provider's consent page - * that asks for permissions for the required scopes explicitly. - */ - buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string - /** - * FetchToken converts an authorization code to token. - */ - fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token) - /** - * FetchRawUserInfo requests and marshalizes into `result` the - * the OAuth user api response. - */ - fetchRawUserInfo(token: oauth2.Token): string|Array - /** - * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and - * marshalizes the user api response into a standardized AuthUser struct. - */ - fetchAuthUser(token: oauth2.Token): (AuthUser) - } - /** - * AuthUser defines a standardized OAuth2 user data structure. - */ - interface AuthUser { - expiry: types.DateTime - rawUser: _TygojaDict - id: string - name: string - username: string - email: string - avatarURL: string - accessToken: string - refreshToken: string - /** - * @todo - * deprecated: use AvatarURL instead - * AvatarUrl will be removed after dropping v0.22 support - */ - avatarUrl: string - } - interface AuthUser { - /** - * MarshalJSON implements the [json.Marshaler] interface. - * - * @todo remove after dropping v0.22 support - */ - marshalJSON(): string|Array - } -} - -/** - * Package sql provides a generic interface around SQL (or SQL-like) - * databases. - * - * The sql package must be used in conjunction with a database driver. - * See https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until - * after the query is completed. - * - * For usage examples, see the wiki page at - * https://golang.org/s/sqlwiki. - */ -namespace sql { - /** - * TxOptions holds the transaction options to be used in [DB.BeginTx]. - */ - interface TxOptions { - /** - * Isolation is the transaction isolation level. - * If zero, the driver or database's default level is used. - */ - isolation: IsolationLevel - readOnly: boolean - } - /** - * NullString represents a string that may be null. - * NullString implements the [Scanner] interface so - * it can be used as a scan destination: - * - * ``` - * var s NullString - * err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s) - * ... - * if s.Valid { - * // use s.String - * } else { - * // NULL value - * } - * ``` - */ - interface NullString { - string: string - valid: boolean // Valid is true if String is not NULL - } - interface NullString { - /** - * Scan implements the [Scanner] interface. - */ - scan(value: any): void - } - interface NullString { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - /** - * DB is a database handle representing a pool of zero or more - * underlying connections. It's safe for concurrent use by multiple - * goroutines. - * - * The sql package creates and frees connections automatically; it - * also maintains a free pool of idle connections. If the database has - * a concept of per-connection state, such state can be reliably observed - * within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the - * returned [Tx] is bound to a single connection. Once [Tx.Commit] or - * [Tx.Rollback] is called on the transaction, that transaction's - * connection is returned to [DB]'s idle connection pool. The pool size - * can be controlled with [DB.SetMaxIdleConns]. - */ - interface DB { - } - interface DB { - /** - * PingContext verifies a connection to the database is still alive, - * establishing a connection if necessary. - */ - pingContext(ctx: context.Context): void - } - interface DB { - /** - * Ping verifies a connection to the database is still alive, - * establishing a connection if necessary. - * - * Ping uses [context.Background] internally; to specify the context, use - * [DB.PingContext]. - */ - ping(): void - } - interface DB { - /** - * Close closes the database and prevents new queries from starting. - * Close then waits for all queries that have started processing on the server - * to finish. - * - * It is rare to Close a [DB], as the [DB] handle is meant to be - * long-lived and shared between many goroutines. - */ - close(): void - } - interface DB { - /** - * SetMaxIdleConns sets the maximum number of connections in the idle - * connection pool. - * - * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns, - * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit. - * - * If n <= 0, no idle connections are retained. - * - * The default max idle connections is currently 2. This may change in - * a future release. - */ - setMaxIdleConns(n: number): void - } - interface DB { - /** - * SetMaxOpenConns sets the maximum number of open connections to the database. - * - * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than - * MaxIdleConns, then MaxIdleConns will be reduced to match the new - * MaxOpenConns limit. - * - * If n <= 0, then there is no limit on the number of open connections. - * The default is 0 (unlimited). - */ - setMaxOpenConns(n: number): void - } - interface DB { - /** - * SetConnMaxLifetime sets the maximum amount of time a connection may be reused. - * - * Expired connections may be closed lazily before reuse. - * - * If d <= 0, connections are not closed due to a connection's age. - */ - setConnMaxLifetime(d: time.Duration): void - } - interface DB { - /** - * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle. - * - * Expired connections may be closed lazily before reuse. - * - * If d <= 0, connections are not closed due to a connection's idle time. - */ - setConnMaxIdleTime(d: time.Duration): void - } - interface DB { - /** - * Stats returns database statistics. - */ - stats(): DBStats - } - interface DB { - /** - * PrepareContext creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface DB { - /** - * Prepare creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * Prepare uses [context.Background] internally; to specify the context, use - * [DB.PrepareContext]. - */ - prepare(query: string): (Stmt) - } - interface DB { - /** - * ExecContext executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface DB { - /** - * Exec executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - * - * Exec uses [context.Background] internally; to specify the context, use - * [DB.ExecContext]. - */ - exec(query: string, ...args: any[]): Result - } - interface DB { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface DB { - /** - * Query executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - * - * Query uses [context.Background] internally; to specify the context, use - * [DB.QueryContext]. - */ - query(query: string, ...args: any[]): (Rows) - } - interface DB { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface DB { - /** - * QueryRow executes a query that is expected to return at most one row. - * QueryRow always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, [*Row.Scan] scans the first selected row and discards - * the rest. - * - * QueryRow uses [context.Background] internally; to specify the context, use - * [DB.QueryRowContext]. - */ - queryRow(query: string, ...args: any[]): (Row) - } - interface DB { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled back. - * If the context is canceled, the sql package will roll back - * the transaction. [Tx.Commit] will return an error if the context provided to - * BeginTx is canceled. - * - * The provided [TxOptions] is optional and may be nil if defaults should be used. - * If a non-default isolation level is used that the driver doesn't support, - * an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): (Tx) - } - interface DB { - /** - * Begin starts a transaction. The default isolation level is dependent on - * the driver. - * - * Begin uses [context.Background] internally; to specify the context, use - * [DB.BeginTx]. - */ - begin(): (Tx) - } - interface DB { - /** - * Driver returns the database's underlying driver. - */ - driver(): any - } - interface DB { - /** - * Conn returns a single connection by either opening a new connection - * or returning an existing connection from the connection pool. Conn will - * block until either a connection is returned or ctx is canceled. - * Queries run on the same Conn will be run in the same database session. - * - * Every Conn must be returned to the database pool after use by - * calling [Conn.Close]. - */ - conn(ctx: context.Context): (Conn) - } - /** - * Tx is an in-progress database transaction. - * - * A transaction must end with a call to [Tx.Commit] or [Tx.Rollback]. - * - * After a call to [Tx.Commit] or [Tx.Rollback], all operations on the - * transaction fail with [ErrTxDone]. - * - * The statements prepared for a transaction by calling - * the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed - * by the call to [Tx.Commit] or [Tx.Rollback]. - */ - interface Tx { - } - interface Tx { - /** - * Commit commits the transaction. - */ - commit(): void - } - interface Tx { - /** - * Rollback aborts the transaction. - */ - rollback(): void - } - interface Tx { - /** - * PrepareContext creates a prepared statement for use within a transaction. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * To use an existing prepared statement on this transaction, see [Tx.Stmt]. - * - * The provided context will be used for the preparation of the context, not - * for the execution of the returned statement. The returned statement - * will run in the transaction context. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface Tx { - /** - * Prepare creates a prepared statement for use within a transaction. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * To use an existing prepared statement on this transaction, see [Tx.Stmt]. - * - * Prepare uses [context.Background] internally; to specify the context, use - * [Tx.PrepareContext]. - */ - prepare(query: string): (Stmt) - } - interface Tx { - /** - * StmtContext returns a transaction-specific prepared statement from - * an existing statement. - * - * Example: - * - * ``` - * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") - * ... - * tx, err := db.Begin() - * ... - * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203) - * ``` - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - */ - stmtContext(ctx: context.Context, stmt: Stmt): (Stmt) - } - interface Tx { - /** - * Stmt returns a transaction-specific prepared statement from - * an existing statement. - * - * Example: - * - * ``` - * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") - * ... - * tx, err := db.Begin() - * ... - * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203) - * ``` - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * Stmt uses [context.Background] internally; to specify the context, use - * [Tx.StmtContext]. - */ - stmt(stmt: Stmt): (Stmt) - } - interface Tx { - /** - * ExecContext executes a query that doesn't return rows. - * For example: an INSERT and UPDATE. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Tx { - /** - * Exec executes a query that doesn't return rows. - * For example: an INSERT and UPDATE. - * - * Exec uses [context.Background] internally; to specify the context, use - * [Tx.ExecContext]. - */ - exec(query: string, ...args: any[]): Result - } - interface Tx { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface Tx { - /** - * Query executes a query that returns rows, typically a SELECT. - * - * Query uses [context.Background] internally; to specify the context, use - * [Tx.QueryContext]. - */ - query(query: string, ...args: any[]): (Rows) - } - interface Tx { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface Tx { - /** - * QueryRow executes a query that is expected to return at most one row. - * QueryRow always returns a non-nil value. Errors are deferred until - * [Row]'s Scan method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - * - * QueryRow uses [context.Background] internally; to specify the context, use - * [Tx.QueryRowContext]. - */ - queryRow(query: string, ...args: any[]): (Row) - } - /** - * Stmt is a prepared statement. - * A Stmt is safe for concurrent use by multiple goroutines. - * - * If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single - * underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will - * become unusable and all operations will return an error. - * If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the - * [DB]. When the Stmt needs to execute on a new underlying connection, it will - * prepare itself on the new connection automatically. - */ - interface Stmt { - } - interface Stmt { - /** - * ExecContext executes a prepared statement with the given arguments and - * returns a [Result] summarizing the effect of the statement. - */ - execContext(ctx: context.Context, ...args: any[]): Result - } - interface Stmt { - /** - * Exec executes a prepared statement with the given arguments and - * returns a [Result] summarizing the effect of the statement. - * - * Exec uses [context.Background] internally; to specify the context, use - * [Stmt.ExecContext]. - */ - exec(...args: any[]): Result - } - interface Stmt { - /** - * QueryContext executes a prepared query statement with the given arguments - * and returns the query results as a [*Rows]. - */ - queryContext(ctx: context.Context, ...args: any[]): (Rows) - } - interface Stmt { - /** - * Query executes a prepared query statement with the given arguments - * and returns the query results as a *Rows. - * - * Query uses [context.Background] internally; to specify the context, use - * [Stmt.QueryContext]. - */ - query(...args: any[]): (Rows) - } - interface Stmt { - /** - * QueryRowContext executes a prepared query statement with the given arguments. - * If an error occurs during the execution of the statement, that error will - * be returned by a call to Scan on the returned [*Row], which is always non-nil. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, ...args: any[]): (Row) - } - interface Stmt { - /** - * QueryRow executes a prepared query statement with the given arguments. - * If an error occurs during the execution of the statement, that error will - * be returned by a call to Scan on the returned [*Row], which is always non-nil. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - * - * Example usage: - * - * ``` - * var name string - * err := nameByUseridStmt.QueryRow(id).Scan(&name) - * ``` - * - * QueryRow uses [context.Background] internally; to specify the context, use - * [Stmt.QueryRowContext]. - */ - queryRow(...args: any[]): (Row) - } - interface Stmt { - /** - * Close closes the statement. - */ - close(): void - } - /** - * Rows is the result of a query. Its cursor starts before the first row - * of the result set. Use [Rows.Next] to advance from row to row. - */ - interface Rows { - } - interface Rows { - /** - * Next prepares the next result row for reading with the [Rows.Scan] method. It - * returns true on success, or false if there is no next result row or an error - * happened while preparing it. [Rows.Err] should be consulted to distinguish between - * the two cases. - * - * Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next]. - */ - next(): boolean - } - interface Rows { - /** - * NextResultSet prepares the next result set for reading. It reports whether - * there is further result sets, or false if there is no further result set - * or if there is an error advancing to it. The [Rows.Err] method should be consulted - * to distinguish between the two cases. - * - * After calling NextResultSet, the [Rows.Next] method should always be called before - * scanning. If there are further result sets they may not have rows in the result - * set. - */ - nextResultSet(): boolean - } - interface Rows { - /** - * Err returns the error, if any, that was encountered during iteration. - * Err may be called after an explicit or implicit [Rows.Close]. - */ - err(): void - } - interface Rows { - /** - * Columns returns the column names. - * Columns returns an error if the rows are closed. - */ - columns(): Array - } - interface Rows { - /** - * ColumnTypes returns column information such as column type, length, - * and nullable. Some information may not be available from some drivers. - */ - columnTypes(): Array<(ColumnType | undefined)> - } - interface Rows { - /** - * Scan copies the columns in the current row into the values pointed - * at by dest. The number of values in dest must be the same as the - * number of columns in [Rows]. - * - * Scan converts columns read from the database into the following - * common Go types and special types provided by the sql package: - * - * ``` - * *string - * *[]byte - * *int, *int8, *int16, *int32, *int64 - * *uint, *uint8, *uint16, *uint32, *uint64 - * *bool - * *float32, *float64 - * *interface{} - * *RawBytes - * *Rows (cursor value) - * any type implementing Scanner (see Scanner docs) - * ``` - * - * In the most simple case, if the type of the value from the source - * column is an integer, bool or string type T and dest is of type *T, - * Scan simply assigns the value through the pointer. - * - * Scan also converts between string and numeric types, as long as no - * information would be lost. While Scan stringifies all numbers - * scanned from numeric database columns into *string, scans into - * numeric types are checked for overflow. For example, a float64 with - * value 300 or a string with value "300" can scan into a uint16, but - * not into a uint8, though float64(255) or "255" can scan into a - * uint8. One exception is that scans of some float64 numbers to - * strings may lose information when stringifying. In general, scan - * floating point columns into *float64. - * - * If a dest argument has type *[]byte, Scan saves in that argument a - * copy of the corresponding data. The copy is owned by the caller and - * can be modified and held indefinitely. The copy can be avoided by - * using an argument of type [*RawBytes] instead; see the documentation - * for [RawBytes] for restrictions on its use. - * - * If an argument has type *interface{}, Scan copies the value - * provided by the underlying driver without conversion. When scanning - * from a source value of type []byte to *interface{}, a copy of the - * slice is made and the caller owns the result. - * - * Source values of type [time.Time] may be scanned into values of type - * *time.Time, *interface{}, *string, or *[]byte. When converting to - * the latter two, [time.RFC3339Nano] is used. - * - * Source values of type bool may be scanned into types *bool, - * *interface{}, *string, *[]byte, or [*RawBytes]. - * - * For scanning into *bool, the source may be true, false, 1, 0, or - * string inputs parseable by [strconv.ParseBool]. - * - * Scan can also convert a cursor returned from a query, such as - * "select cursor(select * from my_table) from dual", into a - * [*Rows] value that can itself be scanned from. The parent - * select query will close any cursor [*Rows] if the parent [*Rows] is closed. - * - * If any of the first arguments implementing [Scanner] returns an error, - * that error will be wrapped in the returned error. - */ - scan(...dest: any[]): void - } - interface Rows { - /** - * Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called - * and returns false and there are no further result sets, - * the [Rows] are closed automatically and it will suffice to check the - * result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err]. - */ - close(): void - } - /** - * A Result summarizes an executed SQL command. - */ - interface Result { - [key:string]: any; - /** - * LastInsertId returns the integer generated by the database - * in response to a command. Typically this will be from an - * "auto increment" column when inserting a new row. Not all - * databases support this feature, and the syntax of such - * statements varies. - */ - lastInsertId(): number - /** - * RowsAffected returns the number of rows affected by an - * update, insert, or delete. Not every database or database - * driver may support this. - */ - rowsAffected(): number - } -} - -namespace search { - /** - * Result defines the returned search result structure. - */ - interface Result { - items: any - page: number - perPage: number - totalItems: number - totalPages: number - } - /** - * ResolverResult defines a single FieldResolver.Resolve() successfully parsed result. - */ - interface ResolverResult { - /** - * Identifier is the plain SQL identifier/column that will be used - * in the final db expression as left or right operand. - */ - identifier: string - /** - * NoCoalesce instructs to not use COALESCE or NULL fallbacks - * when building the identifier expression. - */ - noCoalesce: boolean - /** - * Params is a map with db placeholder->value pairs that will be added - * to the query when building both resolved operands/sides in a single expression. - */ - params: dbx.Params - /** - * MultiMatchSubQuery is an optional sub query expression that will be added - * in addition to the combined ResolverResult expression during build. - */ - multiMatchSubQuery: dbx.Expression - /** - * AfterBuild is an optional function that will be called after building - * and combining the result of both resolved operands/sides in a single expression. - */ - afterBuild: (expr: dbx.Expression) => dbx.Expression - } -} - -/** - * Package blob defines a lightweight abstration for interacting with - * various storage services (local filesystem, S3, etc.). - * - * NB! - * For compatibility with earlier PocketBase versions and to prevent - * unnecessary breaking changes, this package is based and implemented - * as a minimal, stripped down version of the previously used gocloud.dev/blob. - * While there is no promise that it won't diverge in the future to accommodate - * better some PocketBase specific use cases, currently it copies and - * tries to follow as close as possible the same implementations, - * conventions and rules for the key escaping/unescaping, blob read/write - * interfaces and struct options as gocloud.dev/blob, therefore the - * credits goes to the original Go Cloud Development Kit Authors. - */ -namespace blob { - /** - * ListObject represents a single blob returned from List. - */ - interface ListObject { - /** - * Key is the key for this blob. - */ - key: string - /** - * ModTime is the time the blob was last modified. - */ - modTime: time.Time - /** - * Size is the size of the blob's content in bytes. - */ - size: number - /** - * MD5 is an MD5 hash of the blob contents or nil if not available. - */ - md5: string|Array - /** - * IsDir indicates that this result represents a "directory" in the - * hierarchical namespace, ending in ListOptions.Delimiter. Key can be - * passed as ListOptions.Prefix to list items in the "directory". - * Fields other than Key and IsDir will not be set if IsDir is true. - */ - isDir: boolean - } - /** - * Attributes contains attributes about a blob. - */ - interface Attributes { - /** - * CacheControl specifies caching attributes that services may use - * when serving the blob. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control - */ - cacheControl: string - /** - * ContentDisposition specifies whether the blob content is expected to be - * displayed inline or as an attachment. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition - */ - contentDisposition: string - /** - * ContentEncoding specifies the encoding used for the blob's content, if any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding - */ - contentEncoding: string - /** - * ContentLanguage specifies the language used in the blob's content, if any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language - */ - contentLanguage: string - /** - * ContentType is the MIME type of the blob. It will not be empty. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type - */ - contentType: string - /** - * Metadata holds key/value pairs associated with the blob. - * Keys are guaranteed to be in lowercase, even if the backend service - * has case-sensitive keys (although note that Metadata written via - * this package will always be lowercased). If there are duplicate - * case-insensitive keys (e.g., "foo" and "FOO"), only one value - * will be kept, and it is undefined which one. - */ - metadata: _TygojaDict - /** - * CreateTime is the time the blob was created, if available. If not available, - * CreateTime will be the zero time. - */ - createTime: time.Time - /** - * ModTime is the time the blob was last modified. - */ - modTime: time.Time - /** - * Size is the size of the blob's content in bytes. - */ - size: number - /** - * MD5 is an MD5 hash of the blob contents or nil if not available. - */ - md5: string|Array - /** - * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. - */ - eTag: string - } - /** - * Reader reads bytes from a blob. - * It implements io.ReadSeekCloser, and must be closed after reads are finished. - */ - interface Reader { - } - interface Reader { - /** - * Read implements io.Reader (https://golang.org/pkg/io/#Reader). - */ - read(p: string|Array): number - } - interface Reader { - /** - * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). - */ - seek(offset: number, whence: number): number - } - interface Reader { - /** - * Close implements io.Closer (https://golang.org/pkg/io/#Closer). - */ - close(): void - } - interface Reader { - /** - * ContentType returns the MIME type of the blob. - */ - contentType(): string - } - interface Reader { - /** - * ModTime returns the time the blob was last modified. - */ - modTime(): time.Time - } - interface Reader { - /** - * Size returns the size of the blob content in bytes. - */ - size(): number - } - interface Reader { - /** - * WriteTo reads from r and writes to w until there's no more data or - * an error occurs. - * The return value is the number of bytes written to w. - * - * It implements the io.WriterTo interface. - */ - writeTo(w: io.Writer): number - } -} - -namespace exec { - /** - * Cmd represents an external command being prepared or run. - * - * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] - * methods. - */ - interface Cmd { - /** - * Path is the path of the command to run. - * - * This is the only field that must be set to a non-zero - * value. If Path is relative, it is evaluated relative - * to Dir. - */ - path: string - /** - * Args holds command line arguments, including the command as Args[0]. - * If the Args field is empty or nil, Run uses {Path}. - * - * In typical use, both Path and Args are set by calling Command. - */ - args: Array - /** - * Env specifies the environment of the process. - * Each entry is of the form "key=value". - * If Env is nil, the new process uses the current process's - * environment. - * If Env contains duplicate environment keys, only the last - * value in the slice for each duplicate key is used. - * As a special case on Windows, SYSTEMROOT is always added if - * missing and not explicitly set to the empty string. - */ - env: Array - /** - * Dir specifies the working directory of the command. - * If Dir is the empty string, Run runs the command in the - * calling process's current directory. - */ - dir: string - /** - * Stdin specifies the process's standard input. - * - * If Stdin is nil, the process reads from the null device (os.DevNull). - * - * If Stdin is an *os.File, the process's standard input is connected - * directly to that file. - * - * Otherwise, during the execution of the command a separate - * goroutine reads from Stdin and delivers that data to the command - * over a pipe. In this case, Wait does not complete until the goroutine - * stops copying, either because it has reached the end of Stdin - * (EOF or a read error), or because writing to the pipe returned an error, - * or because a nonzero WaitDelay was set and expired. - */ - stdin: io.Reader - /** - * Stdout and Stderr specify the process's standard output and error. - * - * If either is nil, Run connects the corresponding file descriptor - * to the null device (os.DevNull). - * - * If either is an *os.File, the corresponding output from the process - * is connected directly to that file. - * - * Otherwise, during the execution of the command a separate goroutine - * reads from the process over a pipe and delivers that data to the - * corresponding Writer. In this case, Wait does not complete until the - * goroutine reaches EOF or encounters an error or a nonzero WaitDelay - * expires. - * - * If Stdout and Stderr are the same writer, and have a type that can - * be compared with ==, at most one goroutine at a time will call Write. - */ - stdout: io.Writer - stderr: io.Writer - /** - * ExtraFiles specifies additional open files to be inherited by the - * new process. It does not include standard input, standard output, or - * standard error. If non-nil, entry i becomes file descriptor 3+i. - * - * ExtraFiles is not supported on Windows. - */ - extraFiles: Array<(os.File | undefined)> - /** - * SysProcAttr holds optional, operating system-specific attributes. - * Run passes it to os.StartProcess as the os.ProcAttr's Sys field. - */ - sysProcAttr?: syscall.SysProcAttr - /** - * Process is the underlying process, once started. - */ - process?: os.Process - /** - * ProcessState contains information about an exited process. - * If the process was started successfully, Wait or Run will - * populate its ProcessState when the command completes. - */ - processState?: os.ProcessState - err: Error // LookPath error, if any. - /** - * If Cancel is non-nil, the command must have been created with - * CommandContext and Cancel will be called when the command's - * Context is done. By default, CommandContext sets Cancel to - * call the Kill method on the command's Process. - * - * Typically a custom Cancel will send a signal to the command's - * Process, but it may instead take other actions to initiate cancellation, - * such as closing a stdin or stdout pipe or sending a shutdown request on a - * network socket. - * - * If the command exits with a success status after Cancel is - * called, and Cancel does not return an error equivalent to - * os.ErrProcessDone, then Wait and similar methods will return a non-nil - * error: either an error wrapping the one returned by Cancel, - * or the error from the Context. - * (If the command exits with a non-success status, or Cancel - * returns an error that wraps os.ErrProcessDone, Wait and similar methods - * continue to return the command's usual exit status.) - * - * If Cancel is set to nil, nothing will happen immediately when the command's - * Context is done, but a nonzero WaitDelay will still take effect. That may - * be useful, for example, to work around deadlocks in commands that do not - * support shutdown signals but are expected to always finish quickly. - * - * Cancel will not be called if Start returns a non-nil error. - */ - cancel: () => void - /** - * If WaitDelay is non-zero, it bounds the time spent waiting on two sources - * of unexpected delay in Wait: a child process that fails to exit after the - * associated Context is canceled, and a child process that exits but leaves - * its I/O pipes unclosed. - * - * The WaitDelay timer starts when either the associated Context is done or a - * call to Wait observes that the child process has exited, whichever occurs - * first. When the delay has elapsed, the command shuts down the child process - * and/or its I/O pipes. - * - * If the child process has failed to exit — perhaps because it ignored or - * failed to receive a shutdown signal from a Cancel function, or because no - * Cancel function was set — then it will be terminated using os.Process.Kill. - * - * Then, if the I/O pipes communicating with the child process are still open, - * those pipes are closed in order to unblock any goroutines currently blocked - * on Read or Write calls. - * - * If pipes are closed due to WaitDelay, no Cancel call has occurred, - * and the command has otherwise exited with a successful status, Wait and - * similar methods will return ErrWaitDelay instead of nil. - * - * If WaitDelay is zero (the default), I/O pipes will be read until EOF, - * which might not occur until orphaned subprocesses of the command have - * also closed their descriptors for the pipes. - */ - waitDelay: time.Duration - } - interface Cmd { - /** - * String returns a human-readable description of c. - * It is intended only for debugging. - * In particular, it is not suitable for use as input to a shell. - * The output of String may vary across Go releases. - */ - string(): string - } - interface Cmd { - /** - * Run starts the specified command and waits for it to complete. - * - * The returned error is nil if the command runs, has no problems - * copying stdin, stdout, and stderr, and exits with a zero exit - * status. - * - * If the command starts but does not complete successfully, the error is of - * type [*ExitError]. Other error types may be returned for other situations. - * - * If the calling goroutine has locked the operating system thread - * with [runtime.LockOSThread] and modified any inheritable OS-level - * thread state (for example, Linux or Plan 9 name spaces), the new - * process will inherit the caller's thread state. - */ - run(): void - } - interface Cmd { - /** - * Start starts the specified command but does not wait for it to complete. - * - * If Start returns successfully, the c.Process field will be set. - * - * After a successful call to Start the [Cmd.Wait] method must be called in - * order to release associated system resources. - */ - start(): void - } - interface Cmd { - /** - * Wait waits for the command to exit and waits for any copying to - * stdin or copying from stdout or stderr to complete. - * - * The command must have been started by [Cmd.Start]. - * - * The returned error is nil if the command runs, has no problems - * copying stdin, stdout, and stderr, and exits with a zero exit - * status. - * - * If the command fails to run or doesn't complete successfully, the - * error is of type [*ExitError]. Other error types may be - * returned for I/O problems. - * - * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits - * for the respective I/O loop copying to or from the process to complete. - * - * Wait releases any resources associated with the [Cmd]. - */ - wait(): void - } - interface Cmd { - /** - * Output runs the command and returns its standard output. - * Any returned error will usually be of type [*ExitError]. - * If c.Stderr was nil, Output populates [ExitError.Stderr]. - */ - output(): string|Array - } - interface Cmd { - /** - * CombinedOutput runs the command and returns its combined standard - * output and standard error. - */ - combinedOutput(): string|Array - } - interface Cmd { - /** - * StdinPipe returns a pipe that will be connected to the command's - * standard input when the command starts. - * The pipe will be closed automatically after [Cmd.Wait] sees the command exit. - * A caller need only call Close to force the pipe to close sooner. - * For example, if the command being run will not exit until standard input - * is closed, the caller must close the pipe. - */ - stdinPipe(): io.WriteCloser - } - interface Cmd { - /** - * StdoutPipe returns a pipe that will be connected to the command's - * standard output when the command starts. - * - * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. - * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. - * See the example for idiomatic usage. - */ - stdoutPipe(): io.ReadCloser - } - interface Cmd { - /** - * StderrPipe returns a pipe that will be connected to the command's - * standard error when the command starts. - * - * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. - * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. - * See the StdoutPipe example for idiomatic usage. - */ - stderrPipe(): io.ReadCloser - } - interface Cmd { - /** - * Environ returns a copy of the environment in which the command would be run - * as it is currently configured. - */ - environ(): Array - } -} - -namespace mailer { - /** - * Message defines a generic email message struct. - */ - interface Message { - from: { address: string; name?: string; } - to: Array<{ address: string; name?: string; }> - bcc: Array<{ address: string; name?: string; }> - cc: Array<{ address: string; name?: string; }> - subject: string - html: string - text: string - headers: _TygojaDict - attachments: _TygojaDict - inlineAttachments: _TygojaDict - } - /** - * Mailer defines a base mail client interface. - */ - interface Mailer { - [key:string]: any; - /** - * Send sends an email with the provided Message. - */ - send(message: Message): void - } -} - -namespace router { - // @ts-ignore - import validation = ozzo_validation - /** - * ApiError defines the struct for a basic api error response. - */ - interface ApiError { - data: _TygojaDict - message: string - status: number - } - interface ApiError { - /** - * Error makes it compatible with the `error` interface. - */ - error(): string - } - interface ApiError { - /** - * RawData returns the unformatted error data (could be an internal error, text, etc.) - */ - rawData(): any - } - interface ApiError { - /** - * Is reports whether the current ApiError wraps the target. - */ - is(target: Error): boolean - } - /** - * Event specifies based Route handler event that is usually intended - * to be embedded as part of a custom event struct. - * - * NB! It is expected that the Response and Request fields are always set. - */ - type _sRVgVta = hook.Event - interface Event extends _sRVgVta { - response: http.ResponseWriter - request?: http.Request - } - interface Event { - /** - * Written reports whether the current response has already been written. - * - * This method always returns false if e.ResponseWritter doesn't implement the WriteTracker interface - * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). - */ - written(): boolean - } - interface Event { - /** - * Status reports the status code of the current response. - * - * This method always returns 0 if e.Response doesn't implement the StatusTracker interface - * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). - */ - status(): number - } - interface Event { - /** - * Flush flushes buffered data to the current response. - * - * Returns [http.ErrNotSupported] if e.Response doesn't implement the [http.Flusher] interface - * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). - */ - flush(): void - } - interface Event { - /** - * IsTLS reports whether the connection on which the request was received is TLS. - */ - isTLS(): boolean - } - interface Event { - /** - * SetCookie is an alias for [http.SetCookie]. - * - * SetCookie adds a Set-Cookie header to the current response's headers. - * The provided cookie must have a valid Name. - * Invalid cookies may be silently dropped. - */ - setCookie(cookie: http.Cookie): void - } - interface Event { - /** - * RemoteIP returns the IP address of the client that sent the request. - * - * IPv6 addresses are returned expanded. - * For example, "2001:db8::1" becomes "2001:0db8:0000:0000:0000:0000:0000:0001". - * - * Note that if you are behind reverse proxy(ies), this method returns - * the IP of the last connecting proxy. - */ - remoteIP(): string - } - interface Event { - /** - * FindUploadedFiles extracts all form files of "key" from a http request - * and returns a slice with filesystem.File instances (if any). - */ - findUploadedFiles(key: string): Array<(filesystem.File | undefined)> - } - interface Event { - /** - * Get retrieves single value from the current event data store. - */ - get(key: string): any - } - interface Event { - /** - * GetAll returns a copy of the current event data store. - */ - getAll(): _TygojaDict - } - interface Event { - /** - * Set saves single value into the current event data store. - */ - set(key: string, value: any): void - } - interface Event { - /** - * SetAll saves all items from m into the current event data store. - */ - setAll(m: _TygojaDict): void - } - interface Event { - /** - * String writes a plain string response. - */ - string(status: number, data: string): void - } - interface Event { - /** - * HTML writes an HTML response. - */ - html(status: number, data: string): void - } - interface Event { - /** - * JSON writes a JSON response. - * - * It also provides a generic response data fields picker if the "fields" query parameter is set. - * For example, if you are requesting `?fields=a,b` for `e.JSON(200, map[string]int{ "a":1, "b":2, "c":3 })`, - * it should result in a JSON response like: `{"a":1, "b": 2}`. - */ - json(status: number, data: any): void - } - interface Event { - /** - * XML writes an XML response. - * It automatically prepends the generic [xml.Header] string to the response. - */ - xml(status: number, data: any): void - } - interface Event { - /** - * Stream streams the specified reader into the response. - */ - stream(status: number, contentType: string, reader: io.Reader): void - } - interface Event { - /** - * Blob writes a blob (bytes slice) response. - */ - blob(status: number, contentType: string, b: string|Array): void - } - interface Event { - /** - * FileFS serves the specified filename from fsys. - * - * It is similar to [echo.FileFS] for consistency with earlier versions. - */ - fileFS(fsys: fs.FS, filename: string): void - } - interface Event { - /** - * NoContent writes a response with no body (ex. 204). - */ - noContent(status: number): void - } - interface Event { - /** - * Redirect writes a redirect response to the specified url. - * The status code must be in between 300 – 399 range. - */ - redirect(status: number, url: string): void - } - interface Event { - error(status: number, message: string, errData: any): (ApiError) - } - interface Event { - badRequestError(message: string, errData: any): (ApiError) - } - interface Event { - notFoundError(message: string, errData: any): (ApiError) - } - interface Event { - forbiddenError(message: string, errData: any): (ApiError) - } - interface Event { - unauthorizedError(message: string, errData: any): (ApiError) - } - interface Event { - tooManyRequestsError(message: string, errData: any): (ApiError) - } - interface Event { - internalServerError(message: string, errData: any): (ApiError) - } - interface Event { - /** - * BindBody unmarshal the request body into the provided dst. - * - * dst must be either a struct pointer or map[string]any. - * - * The rules how the body will be scanned depends on the request Content-Type. - * - * Currently the following Content-Types are supported: - * ``` - * - application/json - * - text/xml, application/xml - * - multipart/form-data, application/x-www-form-urlencoded - * ``` - * - * Respectively the following struct tags are supported (again, which one will be used depends on the Content-Type): - * ``` - * - "json" (json body)- uses the builtin Go json package for unmarshaling. - * - "xml" (xml body) - uses the builtin Go xml package for unmarshaling. - * - "form" (form data) - utilizes the custom [router.UnmarshalRequestData] method. - * ``` - * - * NB! When dst is a struct make sure that it doesn't have public fields - * that shouldn't be bindable and it is advisible such fields to be unexported - * or have a separate struct just for the binding. For example: - * - * ``` - * data := struct{ - * somethingPrivate string - * - * Title string `json:"title" form:"title"` - * Total int `json:"total" form:"total"` - * } - * err := e.BindBody(&data) - * ``` - */ - bindBody(dst: any): void - } - /** - * Router defines a thin wrapper around the standard Go [http.ServeMux] by - * adding support for routing sub-groups, middlewares and other common utils. - * - * Example: - * - * ``` - * r := NewRouter[*MyEvent](eventFactory) - * - * // middlewares - * r.BindFunc(m1, m2) - * - * // routes - * r.GET("/test", handler1) - * - * // sub-routers/groups - * api := r.Group("/api") - * api.GET("/admins", handler2) - * - * // generate a http.ServeMux instance based on the router configurations - * mux, _ := r.BuildMux() - * - * http.ListenAndServe("localhost:8090", mux) - * ``` - */ - type _sZfkvlc = RouterGroup - interface Router extends _sZfkvlc { - } -} - namespace sync { /** * A Locker represents an object that can be locked and unlocked. @@ -20834,266 +20841,6 @@ namespace store { namespace context { } -namespace bufio { - /** - * Reader implements buffering for an io.Reader object. - */ - interface Reader { - } - interface Reader { - /** - * Size returns the size of the underlying buffer in bytes. - */ - size(): number - } - interface Reader { - /** - * Reset discards any buffered data, resets all state, and switches - * the buffered reader to read from r. - * Calling Reset on the zero value of [Reader] initializes the internal buffer - * to the default size. - * Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing. - */ - reset(r: io.Reader): void - } - interface Reader { - /** - * Peek returns the next n bytes without advancing the reader. The bytes stop - * being valid at the next read call. If Peek returns fewer than n bytes, it - * also returns an error explaining why the read is short. The error is - * [ErrBufferFull] if n is larger than b's buffer size. - * - * Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding - * until the next read operation. - */ - peek(n: number): string|Array - } - interface Reader { - /** - * Discard skips the next n bytes, returning the number of bytes discarded. - * - * If Discard skips fewer than n bytes, it also returns an error. - * If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without - * reading from the underlying io.Reader. - */ - discard(n: number): number - } - interface Reader { - /** - * Read reads data into p. - * It returns the number of bytes read into p. - * The bytes are taken from at most one Read on the underlying [Reader], - * hence n may be less than len(p). - * To read exactly len(p) bytes, use io.ReadFull(b, p). - * If the underlying [Reader] can return a non-zero count with io.EOF, - * then this Read method can do so as well; see the [io.Reader] docs. - */ - read(p: string|Array): number - } - interface Reader { - /** - * ReadByte reads and returns a single byte. - * If no byte is available, returns an error. - */ - readByte(): number - } - interface Reader { - /** - * UnreadByte unreads the last byte. Only the most recently read byte can be unread. - * - * UnreadByte returns an error if the most recent method called on the - * [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not - * considered read operations. - */ - unreadByte(): void - } - interface Reader { - /** - * ReadRune reads a single UTF-8 encoded Unicode character and returns the - * rune and its size in bytes. If the encoded rune is invalid, it consumes one byte - * and returns unicode.ReplacementChar (U+FFFD) with a size of 1. - */ - readRune(): [number, number] - } - interface Reader { - /** - * UnreadRune unreads the last rune. If the most recent method called on - * the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this - * regard it is stricter than [Reader.UnreadByte], which will unread the last byte - * from any read operation.) - */ - unreadRune(): void - } - interface Reader { - /** - * Buffered returns the number of bytes that can be read from the current buffer. - */ - buffered(): number - } - interface Reader { - /** - * ReadSlice reads until the first occurrence of delim in the input, - * returning a slice pointing at the bytes in the buffer. - * The bytes stop being valid at the next read. - * If ReadSlice encounters an error before finding a delimiter, - * it returns all the data in the buffer and the error itself (often io.EOF). - * ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim. - * Because the data returned from ReadSlice will be overwritten - * by the next I/O operation, most clients should use - * [Reader.ReadBytes] or ReadString instead. - * ReadSlice returns err != nil if and only if line does not end in delim. - */ - readSlice(delim: number): string|Array - } - interface Reader { - /** - * ReadLine is a low-level line-reading primitive. Most callers should use - * [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner]. - * - * ReadLine tries to return a single line, not including the end-of-line bytes. - * If the line was too long for the buffer then isPrefix is set and the - * beginning of the line is returned. The rest of the line will be returned - * from future calls. isPrefix will be false when returning the last fragment - * of the line. The returned buffer is only valid until the next call to - * ReadLine. ReadLine either returns a non-nil line or it returns an error, - * never both. - * - * The text returned from ReadLine does not include the line end ("\r\n" or "\n"). - * No indication or error is given if the input ends without a final line end. - * Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read - * (possibly a character belonging to the line end) even if that byte is not - * part of the line returned by ReadLine. - */ - readLine(): [string|Array, boolean] - } - interface Reader { - /** - * ReadBytes reads until the first occurrence of delim in the input, - * returning a slice containing the data up to and including the delimiter. - * If ReadBytes encounters an error before finding a delimiter, - * it returns the data read before the error and the error itself (often io.EOF). - * ReadBytes returns err != nil if and only if the returned data does not end in - * delim. - * For simple uses, a Scanner may be more convenient. - */ - readBytes(delim: number): string|Array - } - interface Reader { - /** - * ReadString reads until the first occurrence of delim in the input, - * returning a string containing the data up to and including the delimiter. - * If ReadString encounters an error before finding a delimiter, - * it returns the data read before the error and the error itself (often io.EOF). - * ReadString returns err != nil if and only if the returned data does not end in - * delim. - * For simple uses, a Scanner may be more convenient. - */ - readString(delim: number): string - } - interface Reader { - /** - * WriteTo implements io.WriterTo. - * This may make multiple calls to the [Reader.Read] method of the underlying [Reader]. - * If the underlying reader supports the [Reader.WriteTo] method, - * this calls the underlying [Reader.WriteTo] without buffering. - */ - writeTo(w: io.Writer): number - } - /** - * Writer implements buffering for an [io.Writer] object. - * If an error occurs writing to a [Writer], no more data will be - * accepted and all subsequent writes, and [Writer.Flush], will return the error. - * After all data has been written, the client should call the - * [Writer.Flush] method to guarantee all data has been forwarded to - * the underlying [io.Writer]. - */ - interface Writer { - } - interface Writer { - /** - * Size returns the size of the underlying buffer in bytes. - */ - size(): number - } - interface Writer { - /** - * Reset discards any unflushed buffered data, clears any error, and - * resets b to write its output to w. - * Calling Reset on the zero value of [Writer] initializes the internal buffer - * to the default size. - * Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing. - */ - reset(w: io.Writer): void - } - interface Writer { - /** - * Flush writes any buffered data to the underlying [io.Writer]. - */ - flush(): void - } - interface Writer { - /** - * Available returns how many bytes are unused in the buffer. - */ - available(): number - } - interface Writer { - /** - * AvailableBuffer returns an empty buffer with b.Available() capacity. - * This buffer is intended to be appended to and - * passed to an immediately succeeding [Writer.Write] call. - * The buffer is only valid until the next write operation on b. - */ - availableBuffer(): string|Array - } - interface Writer { - /** - * Buffered returns the number of bytes that have been written into the current buffer. - */ - buffered(): number - } - interface Writer { - /** - * Write writes the contents of p into the buffer. - * It returns the number of bytes written. - * If nn < len(p), it also returns an error explaining - * why the write is short. - */ - write(p: string|Array): number - } - interface Writer { - /** - * WriteByte writes a single byte. - */ - writeByte(c: number): void - } - interface Writer { - /** - * WriteRune writes a single Unicode code point, returning - * the number of bytes written and any error. - */ - writeRune(r: number): number - } - interface Writer { - /** - * WriteString writes a string. - * It returns the number of bytes written. - * If the count is less than len(s), it also returns an error explaining - * why the write is short. - */ - writeString(s: string): number - } - interface Writer { - /** - * ReadFrom implements [io.ReaderFrom]. If the underlying writer - * supports the ReadFrom method, this calls the underlying ReadFrom. - * If there is buffered data and an underlying ReadFrom, this fills - * the buffer and writes it before calling ReadFrom. - */ - readFrom(r: io.Reader): number - } -} - namespace net { /** * Addr represents a network end point address. @@ -21368,65 +21115,302 @@ namespace url { } } -namespace cobra { - interface PositionalArgs {(cmd: Command, args: Array): void } - // @ts-ignore - import flag = pflag +namespace jwt { /** - * FParseErrWhitelist configures Flag parse errors to be ignored + * NumericDate represents a JSON numeric date value, as referenced at + * https://datatracker.ietf.org/doc/html/rfc7519#section-2. */ - interface FParseErrWhitelist extends _TygojaAny{} - /** - * Group Structure to manage groups for commands - */ - interface Group { - id: string - title: string + type _siDBPCJ = time.Time + interface NumericDate extends _siDBPCJ { + } + interface NumericDate { + /** + * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch + * represented in NumericDate to a byte array, using the precision specified in TimePrecision. + */ + marshalJSON(): string|Array + } + interface NumericDate { + /** + * UnmarshalJSON is an implementation of the json.RawMessage interface and + * deserializes a [NumericDate] from a JSON representation, i.e. a + * [json.Number]. This number represents an UNIX epoch with either integer or + * non-integer seconds. + */ + unmarshalJSON(b: string|Array): void } /** - * CompletionOptions are the options to control shell completion + * ClaimStrings is basically just a slice of strings, but it can be either + * serialized from a string array or just a string. This type is necessary, + * since the "aud" claim can either be a single string or an array. */ - interface CompletionOptions { - /** - * DisableDefaultCmd prevents Cobra from creating a default 'completion' command - */ - disableDefaultCmd: boolean - /** - * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag - * for shells that support completion descriptions - */ - disableNoDescFlag: boolean - /** - * DisableDescriptions turns off all completion descriptions for shells - * that support them - */ - disableDescriptions: boolean - /** - * HiddenDefaultCmd makes the default 'completion' command hidden - */ - hiddenDefaultCmd: boolean + interface ClaimStrings extends Array{} + interface ClaimStrings { + unmarshalJSON(data: string|Array): void + } + interface ClaimStrings { + marshalJSON(): string|Array } - /** - * Completion is a string that can be used for completions - * - * two formats are supported: - * ``` - * - the completion choice - * - the completion choice with a textual description (separated by a TAB). - * ``` - * - * [CompletionWithDesc] can be used to create a completion string with a textual description. - * - * Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. - */ - interface Completion extends String{} - /** - * CompletionFunc is a function that provides completion results. - */ - interface CompletionFunc {(cmd: Command, args: Array, toComplete: string): [Array, ShellCompDirective] } } -namespace types { +namespace bufio { + /** + * Reader implements buffering for an io.Reader object. + */ + interface Reader { + } + interface Reader { + /** + * Size returns the size of the underlying buffer in bytes. + */ + size(): number + } + interface Reader { + /** + * Reset discards any buffered data, resets all state, and switches + * the buffered reader to read from r. + * Calling Reset on the zero value of [Reader] initializes the internal buffer + * to the default size. + * Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing. + */ + reset(r: io.Reader): void + } + interface Reader { + /** + * Peek returns the next n bytes without advancing the reader. The bytes stop + * being valid at the next read call. If Peek returns fewer than n bytes, it + * also returns an error explaining why the read is short. The error is + * [ErrBufferFull] if n is larger than b's buffer size. + * + * Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding + * until the next read operation. + */ + peek(n: number): string|Array + } + interface Reader { + /** + * Discard skips the next n bytes, returning the number of bytes discarded. + * + * If Discard skips fewer than n bytes, it also returns an error. + * If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without + * reading from the underlying io.Reader. + */ + discard(n: number): number + } + interface Reader { + /** + * Read reads data into p. + * It returns the number of bytes read into p. + * The bytes are taken from at most one Read on the underlying [Reader], + * hence n may be less than len(p). + * To read exactly len(p) bytes, use io.ReadFull(b, p). + * If the underlying [Reader] can return a non-zero count with io.EOF, + * then this Read method can do so as well; see the [io.Reader] docs. + */ + read(p: string|Array): number + } + interface Reader { + /** + * ReadByte reads and returns a single byte. + * If no byte is available, returns an error. + */ + readByte(): number + } + interface Reader { + /** + * UnreadByte unreads the last byte. Only the most recently read byte can be unread. + * + * UnreadByte returns an error if the most recent method called on the + * [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not + * considered read operations. + */ + unreadByte(): void + } + interface Reader { + /** + * ReadRune reads a single UTF-8 encoded Unicode character and returns the + * rune and its size in bytes. If the encoded rune is invalid, it consumes one byte + * and returns unicode.ReplacementChar (U+FFFD) with a size of 1. + */ + readRune(): [number, number] + } + interface Reader { + /** + * UnreadRune unreads the last rune. If the most recent method called on + * the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this + * regard it is stricter than [Reader.UnreadByte], which will unread the last byte + * from any read operation.) + */ + unreadRune(): void + } + interface Reader { + /** + * Buffered returns the number of bytes that can be read from the current buffer. + */ + buffered(): number + } + interface Reader { + /** + * ReadSlice reads until the first occurrence of delim in the input, + * returning a slice pointing at the bytes in the buffer. + * The bytes stop being valid at the next read. + * If ReadSlice encounters an error before finding a delimiter, + * it returns all the data in the buffer and the error itself (often io.EOF). + * ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim. + * Because the data returned from ReadSlice will be overwritten + * by the next I/O operation, most clients should use + * [Reader.ReadBytes] or ReadString instead. + * ReadSlice returns err != nil if and only if line does not end in delim. + */ + readSlice(delim: number): string|Array + } + interface Reader { + /** + * ReadLine is a low-level line-reading primitive. Most callers should use + * [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner]. + * + * ReadLine tries to return a single line, not including the end-of-line bytes. + * If the line was too long for the buffer then isPrefix is set and the + * beginning of the line is returned. The rest of the line will be returned + * from future calls. isPrefix will be false when returning the last fragment + * of the line. The returned buffer is only valid until the next call to + * ReadLine. ReadLine either returns a non-nil line or it returns an error, + * never both. + * + * The text returned from ReadLine does not include the line end ("\r\n" or "\n"). + * No indication or error is given if the input ends without a final line end. + * Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read + * (possibly a character belonging to the line end) even if that byte is not + * part of the line returned by ReadLine. + */ + readLine(): [string|Array, boolean] + } + interface Reader { + /** + * ReadBytes reads until the first occurrence of delim in the input, + * returning a slice containing the data up to and including the delimiter. + * If ReadBytes encounters an error before finding a delimiter, + * it returns the data read before the error and the error itself (often io.EOF). + * ReadBytes returns err != nil if and only if the returned data does not end in + * delim. + * For simple uses, a Scanner may be more convenient. + */ + readBytes(delim: number): string|Array + } + interface Reader { + /** + * ReadString reads until the first occurrence of delim in the input, + * returning a string containing the data up to and including the delimiter. + * If ReadString encounters an error before finding a delimiter, + * it returns the data read before the error and the error itself (often io.EOF). + * ReadString returns err != nil if and only if the returned data does not end in + * delim. + * For simple uses, a Scanner may be more convenient. + */ + readString(delim: number): string + } + interface Reader { + /** + * WriteTo implements io.WriterTo. + * This may make multiple calls to the [Reader.Read] method of the underlying [Reader]. + * If the underlying reader supports the [Reader.WriteTo] method, + * this calls the underlying [Reader.WriteTo] without buffering. + */ + writeTo(w: io.Writer): number + } + /** + * Writer implements buffering for an [io.Writer] object. + * If an error occurs writing to a [Writer], no more data will be + * accepted and all subsequent writes, and [Writer.Flush], will return the error. + * After all data has been written, the client should call the + * [Writer.Flush] method to guarantee all data has been forwarded to + * the underlying [io.Writer]. + */ + interface Writer { + } + interface Writer { + /** + * Size returns the size of the underlying buffer in bytes. + */ + size(): number + } + interface Writer { + /** + * Reset discards any unflushed buffered data, clears any error, and + * resets b to write its output to w. + * Calling Reset on the zero value of [Writer] initializes the internal buffer + * to the default size. + * Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing. + */ + reset(w: io.Writer): void + } + interface Writer { + /** + * Flush writes any buffered data to the underlying [io.Writer]. + */ + flush(): void + } + interface Writer { + /** + * Available returns how many bytes are unused in the buffer. + */ + available(): number + } + interface Writer { + /** + * AvailableBuffer returns an empty buffer with b.Available() capacity. + * This buffer is intended to be appended to and + * passed to an immediately succeeding [Writer.Write] call. + * The buffer is only valid until the next write operation on b. + */ + availableBuffer(): string|Array + } + interface Writer { + /** + * Buffered returns the number of bytes that have been written into the current buffer. + */ + buffered(): number + } + interface Writer { + /** + * Write writes the contents of p into the buffer. + * It returns the number of bytes written. + * If nn < len(p), it also returns an error explaining + * why the write is short. + */ + write(p: string|Array): number + } + interface Writer { + /** + * WriteByte writes a single byte. + */ + writeByte(c: number): void + } + interface Writer { + /** + * WriteRune writes a single Unicode code point, returning + * the number of bytes written and any error. + */ + writeRune(r: number): number + } + interface Writer { + /** + * WriteString writes a string. + * It returns the number of bytes written. + * If the count is less than len(s), it also returns an error explaining + * why the write is short. + */ + writeString(s: string): number + } + interface Writer { + /** + * ReadFrom implements [io.ReaderFrom]. If the underlying writer + * supports the ReadFrom method, this calls the underlying ReadFrom. + * If there is buffered data and an underlying ReadFrom, this fills + * the buffer and writes it before calling ReadFrom. + */ + readFrom(r: io.Reader): number + } } /** @@ -21500,6 +21484,303 @@ namespace textproto { } } +namespace subscriptions { +} + +namespace cron { + /** + * Job defines a single registered cron job. + */ + interface Job { + } + interface Job { + /** + * Id returns the cron job id. + */ + id(): string + } + interface Job { + /** + * Expression returns the plain cron job schedule expression. + */ + expression(): string + } + interface Job { + /** + * Run runs the cron job function. + */ + run(): void + } + interface Job { + /** + * MarshalJSON implements [json.Marshaler] and export the current + * jobs data into valid JSON. + */ + marshalJSON(): string|Array + } +} + +namespace cobra { + interface PositionalArgs {(cmd: Command, args: Array): void } + // @ts-ignore + import flag = pflag + /** + * FParseErrWhitelist configures Flag parse errors to be ignored + */ + interface FParseErrWhitelist extends _TygojaAny{} + /** + * Group Structure to manage groups for commands + */ + interface Group { + id: string + title: string + } + /** + * CompletionOptions are the options to control shell completion + */ + interface CompletionOptions { + /** + * DisableDefaultCmd prevents Cobra from creating a default 'completion' command + */ + disableDefaultCmd: boolean + /** + * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + * for shells that support completion descriptions + */ + disableNoDescFlag: boolean + /** + * DisableDescriptions turns off all completion descriptions for shells + * that support them + */ + disableDescriptions: boolean + /** + * HiddenDefaultCmd makes the default 'completion' command hidden + */ + hiddenDefaultCmd: boolean + } + /** + * Completion is a string that can be used for completions + * + * two formats are supported: + * ``` + * - the completion choice + * - the completion choice with a textual description (separated by a TAB). + * ``` + * + * [CompletionWithDesc] can be used to create a completion string with a textual description. + * + * Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. + */ + interface Completion extends String{} + /** + * CompletionFunc is a function that provides completion results. + */ + interface CompletionFunc {(cmd: Command, args: Array, toComplete: string): [Array, ShellCompDirective] } +} + +namespace sql { + /** + * IsolationLevel is the transaction isolation level used in [TxOptions]. + */ + interface IsolationLevel extends Number{} + interface IsolationLevel { + /** + * String returns the name of the transaction isolation level. + */ + string(): string + } + /** + * DBStats contains database statistics. + */ + interface DBStats { + maxOpenConnections: number // Maximum number of open connections to the database. + /** + * Pool Status + */ + openConnections: number // The number of established connections both in use and idle. + inUse: number // The number of connections currently in use. + idle: number // The number of idle connections. + /** + * Counters + */ + waitCount: number // The total number of connections waited for. + waitDuration: time.Duration // The total time blocked waiting for a new connection. + maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. + maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. + maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. + } + /** + * Conn represents a single database connection rather than a pool of database + * connections. Prefer running queries from [DB] unless there is a specific + * need for a continuous single database connection. + * + * A Conn must call [Conn.Close] to return the connection to the database pool + * and may do so concurrently with a running query. + * + * After a call to [Conn.Close], all operations on the + * connection fail with [ErrConnDone]. + */ + interface Conn { + } + interface Conn { + /** + * PingContext verifies the connection to the database is still alive. + */ + pingContext(ctx: context.Context): void + } + interface Conn { + /** + * ExecContext executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Conn { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface Conn { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * the [*Row.Scan] method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface Conn { + /** + * PrepareContext creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface Conn { + /** + * Raw executes f exposing the underlying driver connection for the + * duration of f. The driverConn must not be used outside of f. + * + * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable + * until [Conn.Close] is called. + */ + raw(f: (driverConn: any) => void): void + } + interface Conn { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled back. + * If the context is canceled, the sql package will roll back + * the transaction. [Tx.Commit] will return an error if the context provided to + * BeginTx is canceled. + * + * The provided [TxOptions] is optional and may be nil if defaults should be used. + * If a non-default isolation level is used that the driver doesn't support, + * an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): (Tx) + } + interface Conn { + /** + * Close returns the connection to the connection pool. + * All operations after a Close will return with [ErrConnDone]. + * Close is safe to call concurrently with other operations and will + * block until all other operations finish. It may be useful to first + * cancel any used context and then call close directly after. + */ + close(): void + } + /** + * ColumnType contains the name and type of a column. + */ + interface ColumnType { + } + interface ColumnType { + /** + * Name returns the name or alias of the column. + */ + name(): string + } + interface ColumnType { + /** + * Length returns the column type length for variable length column types such + * as text and binary field types. If the type length is unbounded the value will + * be [math.MaxInt64] (any database limits will still apply). + * If the column type is not variable length, such as an int, or if not supported + * by the driver ok is false. + */ + length(): [number, boolean] + } + interface ColumnType { + /** + * DecimalSize returns the scale and precision of a decimal type. + * If not applicable or if not supported ok is false. + */ + decimalSize(): [number, number, boolean] + } + interface ColumnType { + /** + * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. + * If a driver does not support this property ScanType will return + * the type of an empty interface. + */ + scanType(): any + } + interface ColumnType { + /** + * Nullable reports whether the column may be null. + * If a driver does not support this property ok will be false. + */ + nullable(): [boolean, boolean] + } + interface ColumnType { + /** + * DatabaseTypeName returns the database system name of the column type. If an empty + * string is returned, then the driver type name is not supported. + * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers + * are not included. + * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", + * "INT", and "BIGINT". + */ + databaseTypeName(): string + } + /** + * Row is the result of calling [DB.QueryRow] to select a single row. + */ + interface Row { + } + interface Row { + /** + * Scan copies the columns from the matched row into the values + * pointed at by dest. See the documentation on [Rows.Scan] for details. + * If more than one row matches the query, + * Scan uses the first row and discards the rest. If no row matches + * the query, Scan returns [ErrNoRows]. + */ + scan(...dest: any[]): void + } + interface Row { + /** + * Err provides a way for wrapping packages to check for + * query errors without calling [Row.Scan]. + * Err returns the error, if any, that was encountered while running the query. + * If this error is not nil, this error will also be returned from [Row.Scan]. + */ + err(): void + } +} + namespace multipart { interface Reader { /** @@ -21849,6 +22130,34 @@ namespace http { } } +namespace hook { + /** + * wrapped local Hook embedded struct to limit the public API surface. + */ + type _sYXVSgK = Hook + interface mainHook extends _sYXVSgK { + } +} + +namespace types { +} + +namespace search { +} + +namespace router { + // @ts-ignore + import validation = ozzo_validation + /** + * RouterGroup represents a collection of routes and other sub groups + * that share common pattern prefix and middlewares. + */ + interface RouterGroup { + prefix: string + middlewares: Array<(hook.Handler | undefined)> + } +} + /** * Package oauth2 provides support for making * OAuth2 authorized and authenticated HTTP requests, @@ -21952,308 +22261,6 @@ namespace oauth2 { } } -namespace cron { - /** - * Job defines a single registered cron job. - */ - interface Job { - } - interface Job { - /** - * Id returns the cron job id. - */ - id(): string - } - interface Job { - /** - * Expression returns the plain cron job schedule expression. - */ - expression(): string - } - interface Job { - /** - * Run runs the cron job function. - */ - run(): void - } - interface Job { - /** - * MarshalJSON implements [json.Marshaler] and export the current - * jobs data into valid JSON. - */ - marshalJSON(): string|Array - } -} - -namespace jwt { - /** - * NumericDate represents a JSON numeric date value, as referenced at - * https://datatracker.ietf.org/doc/html/rfc7519#section-2. - */ - type _sydczrO = time.Time - interface NumericDate extends _sydczrO { - } - interface NumericDate { - /** - * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch - * represented in NumericDate to a byte array, using the precision specified in TimePrecision. - */ - marshalJSON(): string|Array - } - interface NumericDate { - /** - * UnmarshalJSON is an implementation of the json.RawMessage interface and - * deserializes a [NumericDate] from a JSON representation, i.e. a - * [json.Number]. This number represents an UNIX epoch with either integer or - * non-integer seconds. - */ - unmarshalJSON(b: string|Array): void - } - /** - * ClaimStrings is basically just a slice of strings, but it can be either - * serialized from a string array or just a string. This type is necessary, - * since the "aud" claim can either be a single string or an array. - */ - interface ClaimStrings extends Array{} - interface ClaimStrings { - unmarshalJSON(data: string|Array): void - } - interface ClaimStrings { - marshalJSON(): string|Array - } -} - -namespace subscriptions { -} - -namespace sql { - /** - * IsolationLevel is the transaction isolation level used in [TxOptions]. - */ - interface IsolationLevel extends Number{} - interface IsolationLevel { - /** - * String returns the name of the transaction isolation level. - */ - string(): string - } - /** - * DBStats contains database statistics. - */ - interface DBStats { - maxOpenConnections: number // Maximum number of open connections to the database. - /** - * Pool Status - */ - openConnections: number // The number of established connections both in use and idle. - inUse: number // The number of connections currently in use. - idle: number // The number of idle connections. - /** - * Counters - */ - waitCount: number // The total number of connections waited for. - waitDuration: time.Duration // The total time blocked waiting for a new connection. - maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. - maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. - maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. - } - /** - * Conn represents a single database connection rather than a pool of database - * connections. Prefer running queries from [DB] unless there is a specific - * need for a continuous single database connection. - * - * A Conn must call [Conn.Close] to return the connection to the database pool - * and may do so concurrently with a running query. - * - * After a call to [Conn.Close], all operations on the - * connection fail with [ErrConnDone]. - */ - interface Conn { - } - interface Conn { - /** - * PingContext verifies the connection to the database is still alive. - */ - pingContext(ctx: context.Context): void - } - interface Conn { - /** - * ExecContext executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Conn { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface Conn { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * the [*Row.Scan] method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface Conn { - /** - * PrepareContext creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface Conn { - /** - * Raw executes f exposing the underlying driver connection for the - * duration of f. The driverConn must not be used outside of f. - * - * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable - * until [Conn.Close] is called. - */ - raw(f: (driverConn: any) => void): void - } - interface Conn { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled back. - * If the context is canceled, the sql package will roll back - * the transaction. [Tx.Commit] will return an error if the context provided to - * BeginTx is canceled. - * - * The provided [TxOptions] is optional and may be nil if defaults should be used. - * If a non-default isolation level is used that the driver doesn't support, - * an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): (Tx) - } - interface Conn { - /** - * Close returns the connection to the connection pool. - * All operations after a Close will return with [ErrConnDone]. - * Close is safe to call concurrently with other operations and will - * block until all other operations finish. It may be useful to first - * cancel any used context and then call close directly after. - */ - close(): void - } - /** - * ColumnType contains the name and type of a column. - */ - interface ColumnType { - } - interface ColumnType { - /** - * Name returns the name or alias of the column. - */ - name(): string - } - interface ColumnType { - /** - * Length returns the column type length for variable length column types such - * as text and binary field types. If the type length is unbounded the value will - * be [math.MaxInt64] (any database limits will still apply). - * If the column type is not variable length, such as an int, or if not supported - * by the driver ok is false. - */ - length(): [number, boolean] - } - interface ColumnType { - /** - * DecimalSize returns the scale and precision of a decimal type. - * If not applicable or if not supported ok is false. - */ - decimalSize(): [number, number, boolean] - } - interface ColumnType { - /** - * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. - * If a driver does not support this property ScanType will return - * the type of an empty interface. - */ - scanType(): any - } - interface ColumnType { - /** - * Nullable reports whether the column may be null. - * If a driver does not support this property ok will be false. - */ - nullable(): [boolean, boolean] - } - interface ColumnType { - /** - * DatabaseTypeName returns the database system name of the column type. If an empty - * string is returned, then the driver type name is not supported. - * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers - * are not included. - * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", - * "INT", and "BIGINT". - */ - databaseTypeName(): string - } - /** - * Row is the result of calling [DB.QueryRow] to select a single row. - */ - interface Row { - } - interface Row { - /** - * Scan copies the columns from the matched row into the values - * pointed at by dest. See the documentation on [Rows.Scan] for details. - * If more than one row matches the query, - * Scan uses the first row and discards the rest. If no row matches - * the query, Scan returns [ErrNoRows]. - */ - scan(...dest: any[]): void - } - interface Row { - /** - * Err provides a way for wrapping packages to check for - * query errors without calling [Row.Scan]. - * Err returns the error, if any, that was encountered while running the query. - * If this error is not nil, this error will also be returned from [Row.Scan]. - */ - err(): void - } -} - -namespace search { -} - -namespace hook { - /** - * wrapped local Hook embedded struct to limit the public API surface. - */ - type _sQrptzS = Hook - interface mainHook extends _sQrptzS { - } -} - -namespace router { - // @ts-ignore - import validation = ozzo_validation - /** - * RouterGroup represents a collection of routes and other sub groups - * that share common pattern prefix and middlewares. - */ - interface RouterGroup { - prefix: string - middlewares: Array<(hook.Handler | undefined)> - } -} - namespace slog { /** * An Attr is a key-value pair. @@ -22450,16 +22457,6 @@ namespace url { } } -namespace cobra { - // @ts-ignore - import flag = pflag - /** - * ShellCompDirective is a bit map representing the different behaviors the shell - * can be instructed to have once completions have been provided. - */ - interface ShellCompDirective extends Number{} -} - namespace multipart { /** * A Part represents a single part in a multipart body. @@ -22687,6 +22684,16 @@ namespace slog { } } +namespace cobra { + // @ts-ignore + import flag = pflag + /** + * ShellCompDirective is a bit map representing the different behaviors the shell + * can be instructed to have once completions have been provided. + */ + interface ShellCompDirective extends Number{} +} + namespace slog { // @ts-ignore import loginternal = internal